Commit e966ec57 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: general update

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

 - Add console_unblank in machine_{restart,halt,power_off} to get
   all messages on the screen.
 - Set console_irq to -1 if condev= parameter is present.
 - Fix write_trylock for 64 bit.
 - Fix svc restarting.
 - System call number on 64 bit is an int. Fix compare in entry64.S.
 - Fix tlb flush problem.
 - Use the idte instruction to flush tlbs of a particular mm.
 - Fix ptrace.
 - Add fadvise64_64 system call wrapper.
 - Fix pfault handling.
 - Do not clobber _PAGE_INVALID_NONE pages in pte_wrprotect.
 - Fix siginfo_t size problem (needs to be 128 for s390x, not 136).
 - Avoid direct assignment to tsk->state, use __set_task_state.
 - Always make any pending restarted system call return -EINTR.
 - Add panic_on_oops.
 - Display symbol for psw address in show_trace.
 - Don't discard sections .exit.text, .exit.data and .eh_frame,
   otherwise stabs information for kerntypes will get lost. 
 - Add memory clobber to assembler inline in ip_fast_checksum for gcc 3.3.
 - Fix softirq_pending calls for the current cpu (cpu == smp_processor_id()).
 - Remove BUG_ON in irq_enter. Two irq_enters are possible.
parent 0da570b7
...@@ -29,6 +29,7 @@ CONFIG_EPOLL=y ...@@ -29,6 +29,7 @@ CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_DEADLINE=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# #
# Loadable module support # Loadable module support
...@@ -109,8 +110,8 @@ CONFIG_SCSI_LOGGING=y ...@@ -109,8 +110,8 @@ CONFIG_SCSI_LOGGING=y
# #
# SCSI low-level drivers # SCSI low-level drivers
# #
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set # CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_EATA_PIO is not set # CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_DEBUG is not set
CONFIG_ZFCP=y CONFIG_ZFCP=y
...@@ -317,6 +318,21 @@ CONFIG_QETH=y ...@@ -317,6 +318,21 @@ CONFIG_QETH=y
# CONFIG_QETH_PERF_STATS is not set # CONFIG_QETH_PERF_STATS is not set
CONFIG_CCWGROUP=y CONFIG_CCWGROUP=y
#
# Amateur Radio support
#
# CONFIG_HAMRADIO is not set
#
# IrDA (infrared) support
#
# CONFIG_IRDA is not set
#
# Bluetooth support
#
# CONFIG_BT is not set
# #
# File systems # File systems
# #
...@@ -358,8 +374,7 @@ CONFIG_PROC_KCORE=y ...@@ -358,8 +374,7 @@ CONFIG_PROC_KCORE=y
# CONFIG_DEVFS_FS is not set # CONFIG_DEVFS_FS is not set
CONFIG_DEVPTS_FS=y CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set # CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set CONFIG_TMPFS=y
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set # CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y CONFIG_RAMFS=y
...@@ -385,6 +400,7 @@ CONFIG_RAMFS=y ...@@ -385,6 +400,7 @@ CONFIG_RAMFS=y
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
CONFIG_NFS_V3=y CONFIG_NFS_V3=y
# CONFIG_NFS_V4 is not set # CONFIG_NFS_V4 is not set
# CONFIG_NFS_DIRECTIO is not set
CONFIG_NFSD=y CONFIG_NFSD=y
CONFIG_NFSD_V3=y CONFIG_NFSD_V3=y
# CONFIG_NFSD_V4 is not set # CONFIG_NFSD_V4 is not set
...@@ -423,6 +439,11 @@ CONFIG_MSDOS_PARTITION=y ...@@ -423,6 +439,11 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_SUN_PARTITION is not set # CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set # CONFIG_EFI_PARTITION is not set
#
# Native Language Support
#
# CONFIG_NLS is not set
# #
# Kernel hacking # Kernel hacking
# #
......
...@@ -249,6 +249,8 @@ sysc_sigpending: ...@@ -249,6 +249,8 @@ sysc_sigpending:
l %r1,BASED(.Ldo_signal) l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal basr %r14,%r1 # call do_signal
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
bo BASED(sysc_restart)
b BASED(sysc_leave) # out of here, do NOT recheck b BASED(sysc_leave) # out of here, do NOT recheck
# #
...@@ -258,7 +260,7 @@ sysc_restart: ...@@ -258,7 +260,7 @@ sysc_restart:
ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
stosm 24(%r15),0x03 # reenable interrupts stosm 24(%r15),0x03 # reenable interrupts
l %r7,SP_R2(%r15) # load new svc number l %r7,SP_R2(%r15) # load new svc number
sla %r2,2 sla %r7,2
mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
lm %r2,%r6,SP_R2(%r15) # load svc arguments lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_restart) # restart svc b BASED(sysc_do_restart) # restart svc
...@@ -541,7 +543,7 @@ io_preempt: ...@@ -541,7 +543,7 @@ io_preempt:
io_resume_loop: io_resume_loop:
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bno BASED(io_leave) bno BASED(io_leave)
mvc __TI_precount(4,%r9),.Lc_pactive mvc __TI_precount(4,%r9),BASED(.Lc_pactive)
# hmpf, we are on the async. stack but to call schedule # hmpf, we are on the async. stack but to call schedule
# we have to move the interrupt frame to the process stack # we have to move the interrupt frame to the process stack
l %r1,SP_R15(%r15) l %r1,SP_R15(%r15)
......
...@@ -169,9 +169,10 @@ system_call: ...@@ -169,9 +169,10 @@ system_call:
slag %r7,%r7,2 # *4 and test for svc 0 slag %r7,%r7,2 # *4 and test for svc 0
jnz sysc_do_restart jnz sysc_do_restart
# svc 0: system call number in %r1 # svc 0: system call number in %r1
clg %r1,.Lnr_syscalls-.Lconst(%r14) cl %r1,.Lnr_syscalls-.Lconst(%r14)
jnl sysc_do_restart jnl sysc_do_restart
slag %r7,%r1,2 # svc 0: system call number in %r1 lgfr %r7,%r1 # clear high word in r1
slag %r7,%r7,2 # svc 0: system call number in %r1
sysc_do_restart: sysc_do_restart:
larl %r10,sys_call_table larl %r10,sys_call_table
#ifdef CONFIG_S390_SUPPORT #ifdef CONFIG_S390_SUPPORT
...@@ -235,16 +236,18 @@ sysc_sigpending: ...@@ -235,16 +236,18 @@ sysc_sigpending:
sgr %r3,%r3 # clear *oldset sgr %r3,%r3 # clear *oldset
brasl %r14,do_signal # call do_signal brasl %r14,do_signal # call do_signal
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
tm __TI_flags+7(%r9),_TIF_RESTART_SVC
jo sysc_restart
j sysc_leave # out of here, do NOT recheck j sysc_leave # out of here, do NOT recheck
# #
# _TIF_RESTART_SVC is set, set up registers and restart svc # _TIF_RESTART_SVC is set, set up registers and restart svc
# #
sysc_restart: sysc_restart:
ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
stosm 48(%r15),0x03 # reenable interrupts stosm 48(%r15),0x03 # reenable interrupts
lg %r7,SP_R2(%r15) # load new svc number lg %r7,SP_R2(%r15) # load new svc number
slag %r7,%r7,3 # *8 slag %r7,%r7,2 # *4
mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
lmg %r2,%r6,SP_R2(%r15) # load svc arguments lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_restart # restart svc j sysc_do_restart # restart svc
...@@ -503,7 +506,7 @@ pgm_svcstd: ...@@ -503,7 +506,7 @@ pgm_svcstd:
larl %r10,sys_call_table_emu # use 31 bit emulation system calls larl %r10,sys_call_table_emu # use 31 bit emulation system calls
pgm_svcper_noemu: pgm_svcper_noemu:
#endif #endif
tm __TI_flags+3(%r9),_TIF_SYSCALL_TRACE tm __TI_flags+7(%r9),_TIF_SYSCALL_TRACE
lgf %r8,0(%r7,%r10) # load address of system call routine lgf %r8,0(%r7,%r10) # load address of system call routine
jo pgm_tracesys jo pgm_tracesys
basr %r14,%r8 # call sys_xxxx basr %r14,%r8 # call sys_xxxx
...@@ -512,7 +515,7 @@ pgm_svcper_noemu: ...@@ -512,7 +515,7 @@ pgm_svcper_noemu:
# changing anything here !! # changing anything here !!
pgm_svcret: pgm_svcret:
tm __TI_flags+3(%r9),_TIF_SIGPENDING tm __TI_flags+7(%r9),_TIF_SIGPENDING
jo pgm_svcper_nosig jo pgm_svcper_nosig
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
sgr %r3,%r3 # clear *oldset sgr %r3,%r3 # clear *oldset
......
...@@ -569,6 +569,19 @@ startup:basr %r13,0 # get base ...@@ -569,6 +569,19 @@ startup:basr %r13,0 # get base
oi 3(%r12),16 # set MVPG flag oi 3(%r12),16 # set MVPG flag
.Lchkmvpg: .Lchkmvpg:
#
# find out if we have the IDTE instruction
#
mvc __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
.long 0xb2b10000 # store facility list
tm 0xc8,0x08 # check bit for clearing-by-ASCE
bno .Lchkidte-.LPG1(%r13)
lhi %r1,2094
lhi %r2,0
.long 0xb98e2001
oi 3(%r12),0x80 # set IDTE flag
.Lchkidte:
lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
# virtual and never return ... # virtual and never return ...
.align 8 .align 8
...@@ -593,6 +606,7 @@ startup:basr %r13,0 # get base ...@@ -593,6 +606,7 @@ startup:basr %r13,0 # get base
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
.Lmemsize:.long memory_size .Lmemsize:.long memory_size
.Lmchunk:.long memory_chunk .Lmchunk:.long memory_chunk
.Lmflags:.long machine_flags .Lmflags:.long machine_flags
......
...@@ -582,6 +582,20 @@ startup:basr %r13,0 # get base ...@@ -582,6 +582,20 @@ startup:basr %r13,0 # get base
mvc __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13) mvc __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13)
0: 0:
#
# find out if we have the IDTE instruction
#
la %r1,0f-.LPG1(%r13) # set program check address
stg %r1,__LC_PGM_NEW_PSW+8
.long 0xb2b10000 # store facility list
tm 0xc8,0x08 # check bit for clearing-by-ASCE
bno 0f-.LPG1(%r13)
lhi %r1,2094
lhi %r2,0
.long 0xb98e2001
oi 7(%r12),0x80 # set IDTE flag
0:
lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
# virtual and never return ... # virtual and never return ...
.align 16 .align 16
......
...@@ -130,7 +130,11 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -130,7 +130,11 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
struct user *dummy = NULL; struct user *dummy = NULL;
addr_t offset, tmp; addr_t offset, tmp;
if ((addr & __ADDR_MASK) || addr > sizeof(struct user) - __ADDR_MASK) /*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell...
*/
if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO; return -EIO;
if (addr <= (addr_t) &dummy->regs.orig_gpr2) { if (addr <= (addr_t) &dummy->regs.orig_gpr2) {
...@@ -138,6 +142,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -138,6 +142,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* psw, gprs, acrs and orig_gpr2 are stored on the stack * psw, gprs, acrs and orig_gpr2 are stored on the stack
*/ */
tmp = *(addr_t *)((addr_t) __KSTK_PTREGS(child) + addr); tmp = *(addr_t *)((addr_t) __KSTK_PTREGS(child) + addr);
if (addr == (addr_t) &dummy->regs.psw.mask)
/* Remove per bit from user psw. */
tmp &= ~PSW_MASK_PER;
} else if (addr >= (addr_t) &dummy->regs.fp_regs && } else if (addr >= (addr_t) &dummy->regs.fp_regs &&
addr < (addr_t) (&dummy->regs.fp_regs + 1)) { addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
...@@ -173,7 +180,11 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -173,7 +180,11 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
struct user *dummy = NULL; struct user *dummy = NULL;
addr_t offset; addr_t offset;
if ((addr & __ADDR_MASK) || addr > sizeof(struct user) - __ADDR_MASK) /*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell indeed...
*/
if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO; return -EIO;
if (addr <= (addr_t) &dummy->regs.orig_gpr2) { if (addr <= (addr_t) &dummy->regs.orig_gpr2) {
...@@ -258,7 +269,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) ...@@ -258,7 +269,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR_AREA: case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA: case PTRACE_POKEUSR_AREA:
if (!copy_from_user(&parea, (void *) addr, sizeof(parea))) if (copy_from_user(&parea, (void *) addr, sizeof(parea)))
return -EFAULT; return -EFAULT;
addr = parea.kernel_addr; addr = parea.kernel_addr;
data = parea.process_addr; data = parea.process_addr;
...@@ -266,8 +277,12 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) ...@@ -266,8 +277,12 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
while (copied < parea.len) { while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA) if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user(child, addr, data); ret = peek_user(child, addr, data);
else else {
ret = poke_user(child, addr, data); addr_t tmp;
if (get_user (tmp, (addr_t *) data))
return -EFAULT;
ret = poke_user(child, addr, tmp);
}
if (ret) if (ret)
return ret; return ret;
addr += sizeof(unsigned long); addr += sizeof(unsigned long);
...@@ -390,7 +405,7 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) ...@@ -390,7 +405,7 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
if ((tmp & ~PSW32_MASK_CC) != PSW32_USER_BITS) if ((tmp & ~PSW32_MASK_CC) != PSW32_USER_BITS)
/* Invalid psw mask. */ /* Invalid psw mask. */
return -EINVAL; return -EINVAL;
__KSTK_PTREGS(child)->psw.mask = PSW_USER_BITS | __KSTK_PTREGS(child)->psw.mask = PSW_USER32_BITS |
((tmp & PSW32_MASK_CC) << 32); ((tmp & PSW32_MASK_CC) << 32);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) { } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Build a 64 bit psw address from 31 bit address. */ /* Build a 64 bit psw address from 31 bit address. */
...@@ -484,7 +499,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) ...@@ -484,7 +499,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR_AREA: case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA: case PTRACE_POKEUSR_AREA:
if (!copy_from_user(&parea, (void *) addr, sizeof(parea))) if (copy_from_user(&parea, (void *) addr, sizeof(parea)))
return -EFAULT; return -EFAULT;
addr = parea.kernel_addr; addr = parea.kernel_addr;
data = parea.process_addr; data = parea.process_addr;
...@@ -492,8 +507,12 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) ...@@ -492,8 +507,12 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
while (copied < parea.len) { while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA) if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user_emu31(child, addr, data); ret = peek_user_emu31(child, addr, data);
else else {
ret = poke_user_emu31(child, addr, data); __u32 tmp;
if (get_user (tmp, (__u32 *) data))
return -EFAULT;
ret = poke_user_emu31(child, addr, tmp);
}
if (ret) if (ret)
return ret; return ret;
addr += sizeof(unsigned int); addr += sizeof(unsigned int);
......
...@@ -64,14 +64,14 @@ void __down(struct semaphore * sem) ...@@ -64,14 +64,14 @@ void __down(struct semaphore * sem)
struct task_struct *tsk = current; struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk); DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE; __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait); add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) { while (__sem_update_count(sem, -1) <= 0) {
schedule(); schedule();
tsk->state = TASK_UNINTERRUPTIBLE; set_task_state(tsk, TASK_UNINTERRUPTIBLE);
} }
remove_wait_queue(&sem->wait, &wait); remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING; __set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait); wake_up(&sem->wait);
} }
...@@ -87,7 +87,7 @@ int __down_interruptible(struct semaphore * sem) ...@@ -87,7 +87,7 @@ int __down_interruptible(struct semaphore * sem)
struct task_struct *tsk = current; struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk); DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE; __set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait); add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) { while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) { if (signal_pending(current)) {
...@@ -96,10 +96,10 @@ int __down_interruptible(struct semaphore * sem) ...@@ -96,10 +96,10 @@ int __down_interruptible(struct semaphore * sem)
break; break;
} }
schedule(); schedule();
tsk->state = TASK_INTERRUPTIBLE; set_task_state(tsk, TASK_INTERRUPTIBLE);
} }
remove_wait_queue(&sem->wait, &wait); remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING; __set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait); wake_up(&sem->wait);
return retval; return retval;
} }
......
...@@ -158,8 +158,10 @@ static int __init condev_setup(char *str) ...@@ -158,8 +158,10 @@ static int __init condev_setup(char *str)
int vdev; int vdev;
vdev = simple_strtoul(str, &str, 0); vdev = simple_strtoul(str, &str, 0);
if (vdev >= 0 && vdev < 65536) if (vdev >= 0 && vdev < 65536) {
console_device = vdev; console_device = vdev;
console_irq = -1;
}
return 1; return 1;
} }
...@@ -287,6 +289,7 @@ void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; ...@@ -287,6 +289,7 @@ void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
void machine_restart(char *command) void machine_restart(char *command)
{ {
console_unblank();
_machine_restart(command); _machine_restart(command);
} }
...@@ -294,6 +297,7 @@ EXPORT_SYMBOL(machine_restart); ...@@ -294,6 +297,7 @@ EXPORT_SYMBOL(machine_restart);
void machine_halt(void) void machine_halt(void)
{ {
console_unblank();
_machine_halt(); _machine_halt();
} }
...@@ -301,6 +305,7 @@ EXPORT_SYMBOL(machine_halt); ...@@ -301,6 +305,7 @@ EXPORT_SYMBOL(machine_halt);
void machine_power_off(void) void machine_power_off(void)
{ {
console_unblank();
_machine_power_off(); _machine_power_off();
} }
......
...@@ -167,6 +167,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs *sregs) ...@@ -167,6 +167,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs *sregs)
{ {
int err; int err;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err = __copy_from_user(regs, &sregs->regs, sizeof(_s390_regs_common)); err = __copy_from_user(regs, &sregs->regs, sizeof(_s390_regs_common));
regs->psw.mask = PSW_USER_BITS | (regs->psw.mask & PSW_MASK_CC); regs->psw.mask = PSW_USER_BITS | (regs->psw.mask & PSW_MASK_CC);
regs->psw.addr |= PSW_ADDR_AMODE; regs->psw.addr |= PSW_ADDR_AMODE;
......
...@@ -325,3 +325,39 @@ asmlinkage int s390x_personality(unsigned long personality) ...@@ -325,3 +325,39 @@ asmlinkage int s390x_personality(unsigned long personality)
return ret; return ret;
} }
#endif /* CONFIG_ARCH_S390X */ #endif /* CONFIG_ARCH_S390X */
/*
* Wrapper function for sys_fadvise64/fadvise64_64
*/
#ifndef CONFIG_ARCH_S390X
extern asmlinkage long sys_fadvise64(int, loff_t, size_t, int);
asmlinkage long
s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
{
return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
len, advice);
}
#endif
extern asmlinkage long sys_fadvise64_64(int, loff_t, loff_t, int);
struct fadvise64_64_args {
int fd;
long long offset;
long long len;
int advice;
};
asmlinkage long
s390_fadvise64_64(struct fadvise64_64_args *args)
{
struct fadvise64_64_args a;
if ( copy_from_user(&a, args, sizeof(a)) )
return -EFAULT;
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
...@@ -15,7 +15,7 @@ SYSCALL(sys_read,sys_read,sys32_read_wrapper) ...@@ -15,7 +15,7 @@ SYSCALL(sys_read,sys_read,sys32_read_wrapper)
SYSCALL(sys_write,sys_write,sys32_write_wrapper) SYSCALL(sys_write,sys_write,sys32_write_wrapper)
SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
SYSCALL(sys_close,sys_close,sys32_close_wrapper) SYSCALL(sys_close,sys_close,sys32_close_wrapper)
SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_ni_syscall) SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
SYSCALL(sys_link,sys_link,sys32_link_wrapper) SYSCALL(sys_link,sys_link,sys32_link_wrapper)
SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
...@@ -261,7 +261,7 @@ SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper) ...@@ -261,7 +261,7 @@ SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */ SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */
SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper) SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper) SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
SYSCALL(sys_fadvise64,sys_fadvise64,sys_ni_syscall) SYSCALL(s390_fadvise64,sys_fadvise64_64,sys_ni_syscall)
SYSCALL(sys_timer_create,sys_timer_create,sys_ni_syscall) SYSCALL(sys_timer_create,sys_timer_create,sys_ni_syscall)
SYSCALL(sys_timer_settime,sys_timer_settime,sys_ni_syscall) /* 255 */ SYSCALL(sys_timer_settime,sys_timer_settime,sys_ni_syscall) /* 255 */
SYSCALL(sys_timer_gettime,sys_timer_gettime,sys_ni_syscall) SYSCALL(sys_timer_gettime,sys_timer_gettime,sys_ni_syscall)
...@@ -272,3 +272,4 @@ SYSCALL(sys_clock_gettime,sys_clock_gettime,sys_ni_syscall) /* 260 */ ...@@ -272,3 +272,4 @@ SYSCALL(sys_clock_gettime,sys_clock_gettime,sys_ni_syscall) /* 260 */
SYSCALL(sys_clock_getres,sys_clock_getres,sys_ni_syscall) SYSCALL(sys_clock_getres,sys_clock_getres,sys_ni_syscall)
SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys_ni_syscall) SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys_ni_syscall)
NI_SYSCALL /* reserved for vserver */ NI_SYSCALL /* reserved for vserver */
SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys_ni_syscall)
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
...@@ -156,9 +157,10 @@ void show_registers(struct pt_regs *regs) ...@@ -156,9 +157,10 @@ void show_registers(struct pt_regs *regs)
int i; int i;
mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
printk("%s PSW : %p %p\n", printk("%s PSW : %p %p",
mode, (void *) regs->psw.mask, mode, (void *) regs->psw.mask,
(void *) regs->psw.addr); (void *) regs->psw.addr);
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
printk("%s GPRS: " FOURLONG, mode, printk("%s GPRS: " FOURLONG, mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" " FOURLONG, printk(" " FOURLONG,
...@@ -250,6 +252,10 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -250,6 +252,10 @@ void die(const char * str, struct pt_regs * regs, long err)
show_regs(regs); show_regs(regs);
bust_spinlocks(0); bust_spinlocks(0);
spin_unlock_irq(&die_lock); spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
......
...@@ -117,10 +117,7 @@ SECTIONS ...@@ -117,10 +117,7 @@ SECTIONS
/* Sections to be discarded */ /* Sections to be discarded */
/DISCARD/ : { /DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit) *(.exitcall.exit)
*(.eh_frame)
} }
/* Stabs debugging sections. */ /* Stabs debugging sections. */
......
...@@ -538,8 +538,6 @@ asmlinkage void ...@@ -538,8 +538,6 @@ asmlinkage void
pfault_interrupt(struct pt_regs *regs, __u16 error_code) pfault_interrupt(struct pt_regs *regs, __u16 error_code)
{ {
struct task_struct *tsk; struct task_struct *tsk;
wait_queue_head_t queue;
wait_queue_head_t *qp;
__u16 subcode; __u16 subcode;
/* /*
...@@ -553,46 +551,34 @@ pfault_interrupt(struct pt_regs *regs, __u16 error_code) ...@@ -553,46 +551,34 @@ pfault_interrupt(struct pt_regs *regs, __u16 error_code)
return; return;
/* /*
* Get the token (= address of kernel stack of affected task). * Get the token (= address of the task structure of the affected task).
*/ */
tsk = *(struct task_struct **) __LC_PFAULT_INTPARM; tsk = *(struct task_struct **) __LC_PFAULT_INTPARM;
/*
* We got all needed information from the lowcore and can
* now safely switch on interrupts.
*/
if (regs->psw.mask & PSW_MASK_PSTATE)
local_irq_enable();
if (subcode & 0x0080) { if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */ /* signal bit is set -> a page has been swapped in by VM */
qp = (wait_queue_head_t *) if (xchg(&tsk->thread.pfault_wait, -1) != 0) {
xchg(&tsk->thread.pfault_wait, -1);
if (qp != NULL) {
/* Initial interrupt was faster than the completion /* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait * interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can * back to zero and wake up the process. This can
* safely be done because the task is still sleeping * safely be done because the task is still sleeping
* and can't procude new pfaults. */ * and can't procude new pfaults. */
tsk->thread.pfault_wait = 0ULL; tsk->thread.pfault_wait = 0;
wake_up(qp); wake_up_process(tsk);
} }
} else { } else {
/* signal bit not set -> a real page is missing. */ /* signal bit not set -> a real page is missing. */
init_waitqueue_head (&queue); set_task_state(tsk, TASK_UNINTERRUPTIBLE);
qp = (wait_queue_head_t *) if (xchg(&tsk->thread.pfault_wait, 1) != 0) {
xchg(&tsk->thread.pfault_wait, (addr_t) &queue);
if (qp != NULL) {
/* Completion interrupt was faster than the initial /* Completion interrupt was faster than the initial
* interrupt (swapped in a -1 for pfault_wait). Set * interrupt (swapped in a -1 for pfault_wait). Set
* pfault_wait back to zero and exit. This can be * pfault_wait back to zero and exit. This can be
* done safely because tsk is running in kernel * done safely because tsk is running in kernel
* mode and can't produce new pfaults. */ * mode and can't produce new pfaults. */
tsk->thread.pfault_wait = 0ULL; tsk->thread.pfault_wait = 0;
} set_task_state(tsk, TASK_RUNNING);
} else
/* go to sleep */ set_tsk_need_resched(tsk);
wait_event(queue, tsk->thread.pfault_wait == 0ULL);
} }
} }
#endif #endif
......
...@@ -526,10 +526,10 @@ __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { ...@@ -526,10 +526,10 @@ __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
* Find-bit routines.. * Find-bit routines..
*/ */
static inline int static inline int
find_first_zero_bit(unsigned long * addr, unsigned size) find_first_zero_bit(unsigned long * addr, unsigned int size)
{ {
unsigned long cmp, count; unsigned long cmp, count;
int res; unsigned int res;
if (!size) if (!size)
return 0; return 0;
...@@ -565,10 +565,10 @@ find_first_zero_bit(unsigned long * addr, unsigned size) ...@@ -565,10 +565,10 @@ find_first_zero_bit(unsigned long * addr, unsigned size)
} }
static inline int static inline int
find_first_bit(unsigned long * addr, unsigned size) find_first_bit(unsigned long * addr, unsigned int size)
{ {
unsigned long cmp, count; unsigned long cmp, count;
int res; unsigned int res;
if (!size) if (!size)
return 0; return 0;
...@@ -1022,7 +1022,7 @@ extern inline int ffs (int x) ...@@ -1022,7 +1022,7 @@ extern inline int ffs (int x)
/* /*
* fls: find last bit set. * fls: find last bit set.
*/ */
extern __inline__ int fls(int x) static __inline__ int fls(int x)
{ {
int r = 32; int r = 32;
...@@ -1095,10 +1095,10 @@ extern __inline__ int fls(int x) ...@@ -1095,10 +1095,10 @@ extern __inline__ int fls(int x)
#ifndef __s390x__ #ifndef __s390x__
static inline int static inline int
ext2_find_first_zero_bit(void *vaddr, unsigned size) ext2_find_first_zero_bit(void *vaddr, unsigned int size)
{ {
unsigned long cmp, count; unsigned long cmp, count;
int res; unsigned int res;
if (!size) if (!size)
return 0; return 0;
...@@ -1135,12 +1135,12 @@ ext2_find_first_zero_bit(void *vaddr, unsigned size) ...@@ -1135,12 +1135,12 @@ ext2_find_first_zero_bit(void *vaddr, unsigned size)
} }
static inline int static inline int
ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset) ext2_find_next_zero_bit(void *vaddr, unsigned int size, unsigned offset)
{ {
unsigned long *addr = vaddr; unsigned long *addr = vaddr;
unsigned long *p = addr + (offset >> 5); unsigned long *p = addr + (offset >> 5);
unsigned long word, reg; unsigned long word, reg;
int bit = offset & 31UL, res; unsigned int bit = offset & 31UL, res;
if (offset >= size) if (offset >= size)
return size; return size;
......
#ifndef _S390_BUG_H #ifndef _S390_BUG_H
#define _S390_BUG_H #define _S390_BUG_H
#include <linux/kernel.h>
#define BUG() do { \ #define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__asm__ __volatile__(".long 0"); \ __asm__ __volatile__(".long 0"); \
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#ifdef __GNUC__ #ifdef __GNUC__
#ifdef __s390x__ #ifdef __s390x__
static __inline__ __const__ __u64 ___arch__swab64p(__u64 *x) static __inline__ __u64 ___arch__swab64p(__u64 *x)
{ {
__u64 result; __u64 result;
...@@ -24,7 +24,7 @@ static __inline__ __const__ __u64 ___arch__swab64p(__u64 *x) ...@@ -24,7 +24,7 @@ static __inline__ __const__ __u64 ___arch__swab64p(__u64 *x)
return result; return result;
} }
static __inline__ __const__ __u64 ___arch__swab64(__u64 x) static __inline__ __u64 ___arch__swab64(__u64 x)
{ {
__u64 result; __u64 result;
...@@ -40,7 +40,7 @@ static __inline__ void ___arch__swab64s(__u64 *x) ...@@ -40,7 +40,7 @@ static __inline__ void ___arch__swab64s(__u64 *x)
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x) static __inline__ __u32 ___arch__swab32p(__u32 *x)
{ {
__u32 result; __u32 result;
...@@ -58,7 +58,7 @@ static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x) ...@@ -58,7 +58,7 @@ static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
return result; return result;
} }
static __inline__ __const__ __u32 ___arch__swab32(__u32 x) static __inline__ __u32 ___arch__swab32(__u32 x)
{ {
#ifndef __s390x__ #ifndef __s390x__
return ___arch__swab32p(&x); return ___arch__swab32p(&x);
...@@ -77,7 +77,7 @@ static __inline__ void ___arch__swab32s(__u32 *x) ...@@ -77,7 +77,7 @@ static __inline__ void ___arch__swab32s(__u32 *x)
*x = ___arch__swab32p(x); *x = ___arch__swab32p(x);
} }
static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x) static __inline__ __u16 ___arch__swab16p(__u16 *x)
{ {
__u16 result; __u16 result;
...@@ -93,7 +93,7 @@ static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x) ...@@ -93,7 +93,7 @@ static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
return result; return result;
} }
static __inline__ __const__ __u16 ___arch__swab16(__u16 x) static __inline__ __u16 ___arch__swab16(__u16 x)
{ {
return ___arch__swab16p(&x); return ___arch__swab16p(&x);
} }
......
...@@ -70,7 +70,7 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum) ...@@ -70,7 +70,7 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
__asm__ __volatile__ ( __asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */ "0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n" " jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc" ); : "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */ #else /* __s390x__ */
__asm__ __volatile__ ( __asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */ " lgr 2,%1\n" /* address in gpr 2 */
...@@ -79,7 +79,7 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum) ...@@ -79,7 +79,7 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
" jo 0b\n" " jo 0b\n"
: "+&d" (sum) : "+&d" (sum)
: "d" (buff), "d" (len) : "d" (buff), "d" (len)
: "cc", "2", "3" ); : "cc", "memory", "2", "3" );
#endif /* __s390x__ */ #endif /* __s390x__ */
return sum; return sum;
} }
...@@ -165,7 +165,7 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl) ...@@ -165,7 +165,7 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl)
" sr %0,%0\n" /* set sum to zero */ " sr %0,%0\n" /* set sum to zero */
"0: cksm %0,%1\n" /* do checksum on longs */ "0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n" " jo 0b\n"
: "=&d" (sum), "+&a" (rp) : : "cc" ); : "=&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */ #else /* __s390x__ */
__asm__ __volatile__ ( __asm__ __volatile__ (
" slgr %0,%0\n" /* set sum to zero */ " slgr %0,%0\n" /* set sum to zero */
...@@ -175,7 +175,7 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl) ...@@ -175,7 +175,7 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl)
" jo 0b\n" " jo 0b\n"
: "=&d" (sum) : "=&d" (sum)
: "d" (iph), "d" (ihl*4) : "d" (iph), "d" (ihl*4)
: "cc", "2", "3" ); : "cc", "memory", "2", "3" );
#endif /* __s390x__ */ #endif /* __s390x__ */
return csum_fold(sum); return csum_fold(sum);
} }
......
...@@ -25,9 +25,17 @@ typedef struct { ...@@ -25,9 +25,17 @@ typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
} irq_cpustat_t; } irq_cpustat_t;
#define softirq_pending(cpu) (lowcore_ptr[(cpu)]->softirq_pending)
#define local_softirq_pending() (S390_lowcore.softirq_pending) #define local_softirq_pending() (S390_lowcore.softirq_pending)
/* this is always called with cpu == smp_processor_id() at the moment */
static inline __u32
softirq_pending(unsigned int cpu)
{
if (cpu == smp_processor_id())
return local_softirq_pending();
return lowcore_ptr[cpu]->softirq_pending;
}
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
/* /*
...@@ -42,12 +50,12 @@ typedef struct { ...@@ -42,12 +50,12 @@ typedef struct {
* *
* PREEMPT_MASK: 0x000000ff * PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00 * SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x00010000 * HARDIRQ_MASK: 0x00ff0000
*/ */
#define PREEMPT_BITS 8 #define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8 #define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 1 #define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0 #define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
...@@ -81,7 +89,6 @@ typedef struct { ...@@ -81,7 +89,6 @@ typedef struct {
#define irq_enter() \ #define irq_enter() \
do { \ do { \
BUG_ON( hardirq_count() ); \
(preempt_count() += HARDIRQ_OFFSET); \ (preempt_count() += HARDIRQ_OFFSET); \
} while(0) } while(0)
......
...@@ -55,11 +55,21 @@ ...@@ -55,11 +55,21 @@
((nr) << _IOC_NRSHIFT) | \ ((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT)) ((size) << _IOC_SIZESHIFT))
/* provoke compile error for invalid uses of size argument */
extern unsigned long __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
/* used to create numbers */ /* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) #define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
/* used to decode ioctl numbers.. */ /* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) #define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
*/ */
#define init_new_context(tsk,mm) 0 #define init_new_context(tsk,mm) 0
#define destroy_context(mm) flush_tlb_mm(mm) #define destroy_context(mm) do { } while (0)
static inline void enter_lazy_tlb(struct mm_struct *mm, static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk) struct task_struct *tsk)
......
...@@ -84,7 +84,11 @@ static inline void pmd_free (pmd_t *pmd) ...@@ -84,7 +84,11 @@ static inline void pmd_free (pmd_t *pmd)
free_pages((unsigned long) pmd, 2); free_pages((unsigned long) pmd, 2);
} }
#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd) #define __pmd_free_tlb(tlb,pmd) \
do { \
tlb_flush_mmu(tlb, 0, 0); \
pmd_free(pmd); \
} while (0)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{ {
...@@ -146,7 +150,7 @@ static inline void pte_free(struct page *pte) ...@@ -146,7 +150,7 @@ static inline void pte_free(struct page *pte)
__free_page(pte); __free_page(pte);
} }
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
/* /*
* This establishes kernel virtual mappings (e.g., as a result of a * This establishes kernel virtual mappings (e.g., as a result of a
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* the S390 page table tree. * the S390 page table tree.
*/ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/bug.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/threads.h> #include <linux/threads.h>
...@@ -465,7 +466,9 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -465,7 +466,9 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern inline pte_t pte_wrprotect(pte_t pte) extern inline pte_t pte_wrprotect(pte_t pte)
{ {
pte_val(pte) |= _PAGE_RO; /* Do not clobber _PAGE_INVALID_NONE pages! */
if (!(pte_val(pte) & _PAGE_INVALID))
pte_val(pte) |= _PAGE_RO;
return pte; return pte;
} }
...@@ -682,9 +685,9 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -682,9 +685,9 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
pte_t pte; pte_t pte;
pte_val(pte) = (type << 1) | (offset << 12) | _PAGE_INVALID_SWAP; pte_val(pte) = (type << 1) | (offset << 12) | _PAGE_INVALID_SWAP;
#ifndef __s390x__ #ifndef __s390x__
pte_val(pte) &= 0x7ffff6fe; /* better to be paranoid */ BUG_ON((pte_val(pte) & 0x80000901) != 0);
#else /* __s390x__ */ #else /* __s390x__ */
pte_val(pte) &= 0xfffffffffffff6fe; /* better to be paranoid */ BUG_ON((pte_val(pte) & 0x901) != 0);
#endif /* __s390x__ */ #endif /* __s390x__ */
return pte; return pte;
} }
......
...@@ -7,6 +7,10 @@ struct scatterlist { ...@@ -7,6 +7,10 @@ struct scatterlist {
unsigned int length; unsigned int length;
}; };
#define ISA_DMA_THRESHOLD (0xffffffffffffffff) #ifdef __s390x__
#define ISA_DMA_THRESHOLD (0xffffffffffffffffUL)
#else
#define ISA_DMA_THRESHOLD (0xffffffffUL)
#endif
#endif /* _ASMS390X_SCATTERLIST_H */ #endif /* _ASMS390X_SCATTERLIST_H */
...@@ -36,6 +36,7 @@ extern unsigned long machine_flags; ...@@ -36,6 +36,7 @@ extern unsigned long machine_flags;
#define MACHINE_HAS_MVPG (machine_flags & 16) #define MACHINE_HAS_MVPG (machine_flags & 16)
#define MACHINE_HAS_DIAG44 (machine_flags & 32) #define MACHINE_HAS_DIAG44 (machine_flags & 32)
#define MACHINE_NEW_STIDP (machine_flags & 64) #define MACHINE_NEW_STIDP (machine_flags & 64)
#define MACHINE_HAS_IDTE (machine_flags & 128)
#ifndef __s390x__ #ifndef __s390x__
#define MACHINE_HAS_IEEE (machine_flags & 2) #define MACHINE_HAS_IEEE (machine_flags & 2)
......
...@@ -14,6 +14,12 @@ ...@@ -14,6 +14,12 @@
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#endif #endif
#ifdef CONFIG_ARCH_S390X
#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
#else
#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
#endif
#include <asm-generic/siginfo.h> #include <asm-generic/siginfo.h>
/* /*
......
...@@ -217,7 +217,7 @@ typedef struct { ...@@ -217,7 +217,7 @@ typedef struct {
extern inline int _raw_write_trylock(rwlock_t *rw) extern inline int _raw_write_trylock(rwlock_t *rw)
{ {
unsigned int result, reg; unsigned long result, reg;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
......
...@@ -99,17 +99,21 @@ static inline void global_flush_tlb(void) ...@@ -99,17 +99,21 @@ static inline void global_flush_tlb(void)
static inline void __flush_tlb_mm(struct mm_struct * mm) static inline void __flush_tlb_mm(struct mm_struct * mm)
{ {
cpumask_t local_cpumask; cpumask_t local_cpumask;
if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return;
if (MACHINE_HAS_IDTE) {
asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (2048),
"a" (__pa(mm->pgd)&PAGE_MASK) : "cc" );
return;
}
preempt_disable(); preempt_disable();
local_cpumask = cpumask_of_cpu(smp_processor_id()); local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) { if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
/* mm was active on more than one cpu. */
if (mm == current->active_mm &&
atomic_read(&mm->mm_users) == 1)
/* this cpu is the only one using the mm. */
mm->cpu_vm_mask = local_cpumask;
global_flush_tlb();
} else
local_flush_tlb(); local_flush_tlb();
else
global_flush_tlb();
preempt_enable(); preempt_enable();
} }
...@@ -136,8 +140,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -136,8 +140,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb_mm(vma->vm_mm); __flush_tlb_mm(vma->vm_mm);
} }
#define flush_tlb_kernel_range(start, end) \ #define flush_tlb_kernel_range(start, end) global_flush_tlb()
__flush_tlb_mm(&init_mm)
#endif #endif
......
...@@ -259,8 +259,9 @@ ...@@ -259,8 +259,9 @@
/* /*
* Number 263 is reserved for vserver * Number 263 is reserved for vserver
*/ */
#define __NR_fadvise64_64 264
#define NR_syscalls 264 #define NR_syscalls 265
/* /*
* There are some system calls that are not present on 64 bit, some * There are some system calls that are not present on 64 bit, some
...@@ -322,6 +323,7 @@ ...@@ -322,6 +323,7 @@
#undef __NR_getdents64 #undef __NR_getdents64
#undef __NR_fcntl64 #undef __NR_fcntl64
#undef __NR_sendfile64 #undef __NR_sendfile64
#undef __NR_fadvise64_64
#define __NR_select 142 #define __NR_select 142
#define __NR_getrlimit 191 /* SuS compliant getrlimit */ #define __NR_getrlimit 191 /* SuS compliant getrlimit */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment