Commit 06b2e988 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6:
  parisc: Call pagefault_disable/pagefault_enable in kmap_atomic/kunmap_atomic
  parisc: Remove unnecessary macros from entry.S
  parisc: LWS fixes for syscall.S
  parisc: Delete unnecessary nop's in entry.S
  parisc: Avoid interruption in critical region in entry.S
  parisc: invoke oom-killer from page fault
  parisc: clear floating point exception flag on SIGFPE signal
  parisc: Use of align_frame provides stack frame.
parents 35926ff5 210501aa
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _PARISC_CACHEFLUSH_H #define _PARISC_CACHEFLUSH_H
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/uaccess.h>
/* The usual comment is "Caches aren't brain-dead on the <architecture>". /* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */ * Unfortunately, that doesn't apply to PA-RISC. */
...@@ -125,11 +126,20 @@ static inline void *kmap(struct page *page) ...@@ -125,11 +126,20 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page)) #define kunmap(page) kunmap_parisc(page_address(page))
#define kmap_atomic(page, idx) page_address(page) static inline void *kmap_atomic(struct page *page, enum km_type idx)
{
pagefault_disable();
return page_address(page);
}
#define kunmap_atomic(addr, idx) kunmap_parisc(addr) static inline void kunmap_atomic(void *addr, enum km_type idx)
{
kunmap_parisc(addr);
pagefault_enable();
}
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif #endif
......
...@@ -45,8 +45,12 @@ ...@@ -45,8 +45,12 @@
#else #else
#define FRAME_SIZE 64 #define FRAME_SIZE 64
#endif #endif
#define FRAME_ALIGN 64
#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) /* Add FRAME_SIZE to the size x and align it to y. All definitions
* that use align_frame will include space for a frame.
*/
#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void) int main(void)
{ {
...@@ -146,7 +150,8 @@ int main(void) ...@@ -146,7 +150,8 @@ int main(void)
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK(); BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct)); DEFINE(TASK_SZ, sizeof(struct task_struct));
DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); /* TASK_SZ_ALGN includes space for a stack frame. */
DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
BLANK(); BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
...@@ -233,7 +238,8 @@ int main(void) ...@@ -233,7 +238,8 @@ int main(void)
DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); /* PT_SZ_ALGN includes space for a stack frame. */
DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK(); BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
...@@ -242,7 +248,8 @@ int main(void) ...@@ -242,7 +248,8 @@ int main(void)
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info)); DEFINE(THREAD_SZ, sizeof(struct thread_info));
DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); /* THREAD_SZ_ALGN includes space for a stack frame. */
DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
BLANK(); BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
......
...@@ -364,32 +364,6 @@ ...@@ -364,32 +364,6 @@
.align 32 .align 32
.endm .endm
/* The following are simple 32 vs 64 bit instruction
* abstractions for the macros */
.macro EXTR reg1,start,length,reg2
#ifdef CONFIG_64BIT
extrd,u \reg1,32+(\start),\length,\reg2
#else
extrw,u \reg1,\start,\length,\reg2
#endif
.endm
.macro DEP reg1,start,length,reg2
#ifdef CONFIG_64BIT
depd \reg1,32+(\start),\length,\reg2
#else
depw \reg1,\start,\length,\reg2
#endif
.endm
.macro DEPI val,start,length,reg
#ifdef CONFIG_64BIT
depdi \val,32+(\start),\length,\reg
#else
depwi \val,\start,\length,\reg
#endif
.endm
/* In LP64, the space contains part of the upper 32 bits of the /* In LP64, the space contains part of the upper 32 bits of the
* fault. We have to extract this and place it in the va, * fault. We have to extract this and place it in the va,
* zeroing the corresponding bits in the space register */ * zeroing the corresponding bits in the space register */
...@@ -442,19 +416,19 @@ ...@@ -442,19 +416,19 @@
*/ */
.macro L2_ptep pmd,pte,index,va,fault .macro L2_ptep pmd,pte,index,va,fault
#if PT_NLEVELS == 3 #if PT_NLEVELS == 3
EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else #else
EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif #endif
DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
copy %r0,\pte copy %r0,\pte
ldw,s \index(\pmd),\pmd ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9 copy \pmd,%r9
SHLREG %r9,PxD_VALUE_SHIFT,\pmd SHLREG %r9,PxD_VALUE_SHIFT,\pmd
EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
LDREG %r0(\pmd),\pte /* pmd is now pte */ LDREG %r0(\pmd),\pte /* pmd is now pte */
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
...@@ -605,7 +579,7 @@ ...@@ -605,7 +579,7 @@
depdi 0,31,32,\tmp depdi 0,31,32,\tmp
#endif #endif
copy \va,\tmp1 copy \va,\tmp1
DEPI 0,31,23,\tmp1 depi 0,31,23,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault cmpb,COND(<>),n \tmp,\tmp1,\fault
ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
depd,z \prot,8,7,\prot depd,z \prot,8,7,\prot
...@@ -997,13 +971,6 @@ intr_restore: ...@@ -997,13 +971,6 @@ intr_restore:
rfi rfi
nop nop
nop
nop
nop
nop
nop
nop
nop
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
# define intr_do_preempt intr_restore # define intr_do_preempt intr_restore
...@@ -2076,9 +2043,10 @@ syscall_restore: ...@@ -2076,9 +2043,10 @@ syscall_restore:
LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
/* NOTE: We use rsm/ssm pair to make this operation atomic */ /* NOTE: We use rsm/ssm pair to make this operation atomic */
LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
rsm PSW_SM_I, %r0 rsm PSW_SM_I, %r0
LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ copy %r1,%r30 /* Restore user sp */
mfsp %sr3,%r1 /* Get users space id */ mfsp %sr3,%r1 /* Get user space id */
mtsp %r1,%sr7 /* Restore sr7 */ mtsp %r1,%sr7 /* Restore sr7 */
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
......
...@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page) ...@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page)
KILL_INSN KILL_INSN
.endr .endr
/* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */ /* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */ /* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (2) #define __NR_lws_entries (2)
lws_entry: lws_entry:
/* Unconditional branch to lws_start, located on the gate lws_start, %r0 /* increase privilege */
same gateway page */ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
b,n lws_start
/* Fill from 0xb4 to 0xe0 */ /* Fill from 0xb8 to 0xe0 */
.rept 11 .rept 10
KILL_INSN KILL_INSN
.endr .endr
...@@ -423,9 +422,6 @@ tracesys_sigexit: ...@@ -423,9 +422,6 @@ tracesys_sigexit:
*********************************************************/ *********************************************************/
lws_start: lws_start:
/* Gate and ensure we return to userspace */
gate .+8, %r0
depi 3, 31, 2, %r31 /* Ensure we return to userspace */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just /* FIXME: If we are a 64-bit kernel just
...@@ -442,7 +438,7 @@ lws_start: ...@@ -442,7 +438,7 @@ lws_start:
#endif #endif
/* Is the lws entry number valid? */ /* Is the lws entry number valid? */
comiclr,>>= __NR_lws_entries, %r20, %r0 comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */ /* WARNING: Trashing sr2 and sr3 */
...@@ -473,7 +469,7 @@ lws_exit: ...@@ -473,7 +469,7 @@ lws_exit:
/* now reset the lowest bit of sp if it was set */ /* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30 xor %r30,%r1,%r30
#endif #endif
be,n 0(%sr3, %r31) be,n 0(%sr7, %r31)
...@@ -529,7 +525,6 @@ lws_compare_and_swap32: ...@@ -529,7 +525,6 @@ lws_compare_and_swap32:
#endif #endif
lws_compare_and_swap: lws_compare_and_swap:
#ifdef CONFIG_SMP
/* Load start of lock table */ /* Load start of lock table */
ldil L%lws_lock_start, %r20 ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28 ldo R%lws_lock_start(%r20), %r28
...@@ -572,8 +567,6 @@ cas_wouldblock: ...@@ -572,8 +567,6 @@ cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */ ldo 2(%r0), %r28 /* 2nd case */
b lws_exit /* Contended... */ b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
#endif
/* CONFIG_SMP */
/* /*
prev = *addr; prev = *addr;
...@@ -601,13 +594,11 @@ cas_action: ...@@ -601,13 +594,11 @@ cas_action:
1: ldw 0(%sr3,%r26), %r28 1: ldw 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0 sub,<> %r28, %r25, %r0
2: stw %r24, 0(%sr3,%r26) 2: stw %r24, 0(%sr3,%r26)
#ifdef CONFIG_SMP
/* Free lock */ /* Free lock */
stw %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
# if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
/* Clear thread register indicator */ /* Clear thread register indicator */
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
# endif
#endif #endif
/* Return to userspace, set no error */ /* Return to userspace, set no error */
b lws_exit b lws_exit
...@@ -615,12 +606,10 @@ cas_action: ...@@ -615,12 +606,10 @@ cas_action:
3: 3:
/* Error occured on load or store */ /* Error occured on load or store */
#ifdef CONFIG_SMP
/* Free lock */ /* Free lock */
stw %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
# if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
# endif
#endif #endif
b lws_exit b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */ ldo -EFAULT(%r0),%r21 /* set errno */
...@@ -672,7 +661,6 @@ ENTRY(sys_call_table64) ...@@ -672,7 +661,6 @@ ENTRY(sys_call_table64)
END(sys_call_table64) END(sys_call_table64)
#endif #endif
#ifdef CONFIG_SMP
/* /*
All light-weight-syscall atomic operations All light-weight-syscall atomic operations
will use this set of locks will use this set of locks
...@@ -694,8 +682,6 @@ ENTRY(lws_lock_start) ...@@ -694,8 +682,6 @@ ENTRY(lws_lock_start)
.endr .endr
END(lws_lock_start) END(lws_lock_start)
.previous .previous
#endif
/* CONFIG_SMP for lws_lock_start */
.end .end
......
...@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) ...@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
return SIGNALCODE(SIGFPE, FPE_FLTINV); return SIGNALCODE(SIGFPE, FPE_FLTINV);
case DIVISIONBYZEROEXCEPTION: case DIVISIONBYZEROEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts); update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
Clear_excp_register(exception_index);
return SIGNALCODE(SIGFPE, FPE_FLTDIV); return SIGNALCODE(SIGFPE, FPE_FLTDIV);
case INEXACTEXCEPTION: case INEXACTEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts); update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
......
...@@ -264,8 +264,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, ...@@ -264,8 +264,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
out_of_memory: out_of_memory:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
printk(KERN_CRIT "VM: killing process %s\n", current->comm); if (!user_mode(regs))
if (user_mode(regs)) goto no_context;
do_group_exit(SIGKILL); pagefault_out_of_memory();
goto no_context;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment