Commit a866374a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

[PATCH] mm: pagefault_{disable,enable}()

Introduce pagefault_{disable,enable}() and use these where previously we did
manual preempt increments/decrements to make the pagefault handler do the
atomic thing.

Currently they still rely on the increased preempt count, but do not rely on
the disabled preemption, this might go away in the future.

(NOTE: the extra barrier() in pagefault_disable might fix some holes on
       machines which have too many registers for their own good)

[heiko.carstens@de.ibm.com: s390 fix]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6edaf68a
...@@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
break; break;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr; unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -52,8 +52,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -52,8 +52,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
return; return;
} }
...@@ -68,8 +67,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -68,8 +67,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
*/ */
kpte_clear_flush(kmap_pte-idx, vaddr); kpte_clear_flush(kmap_pte-idx, vaddr);
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
} }
/* This is the same as kmap_atomic() but can map memory that doesn't /* This is the same as kmap_atomic() but can map memory that doesn't
...@@ -80,7 +78,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) ...@@ -80,7 +78,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx; enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
inc_preempt_count(); pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
......
...@@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type) ...@@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr; unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
return; return;
} }
...@@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
local_flush_tlb_one(vaddr); local_flush_tlb_one(vaddr);
#endif #endif
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
} }
#ifndef CONFIG_LIMITED_DMA #ifndef CONFIG_LIMITED_DMA
...@@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) ...@@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx; enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
inc_preempt_count(); pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/uaccess.h> #include <linux/uaccess.h>
#include <asm/futex.h> #include <asm/futex.h>
#ifndef __s390x__ #ifndef __s390x__
...@@ -258,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) ...@@ -258,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
{ {
int oldval = 0, newval, ret; int oldval = 0, newval, ret;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -284,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) ...@@ -284,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
default: default:
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
*old = oldval; *old = oldval;
return ret; return ret;
} }
......
...@@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr; unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
return; return;
} }
...@@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#endif #endif
#endif #endif
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
} }
/* We may be fed a pagetable here by ptep_to_xxx and others. */ /* We may be fed a pagetable here by ptep_to_xxx and others. */
......
...@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
{ {
unsigned long paddr; unsigned long paddr;
inc_preempt_count(); pagefault_disable();
paddr = page_to_phys(page); paddr = page_to_phys(page);
switch (type) { switch (type) {
...@@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
default: default:
BUG(); BUG();
} }
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
} }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
if (op == FUTEX_OP_SET) if (op == FUTEX_OP_SET)
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
...@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr; unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count(); pagefault_disable();
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
...@@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < KMAP_FIX_BEGIN) { // FIXME if (vaddr < KMAP_FIX_BEGIN) { // FIXME
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
return; return;
} }
...@@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
pte_clear(&init_mm, vaddr, kmap_pte+idx); pte_clear(&init_mm, vaddr, kmap_pte+idx);
flush_tlb_page(NULL, vaddr); flush_tlb_page(NULL, vaddr);
#endif #endif
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
} }
static inline struct page *kmap_atomic_to_page(void *ptr) static inline struct page *kmap_atomic_to_page(void *ptr)
......
...@@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
...@@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
inc_preempt_count(); pagefault_disable();
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
...@@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS; ret = -ENOSYS;
} }
dec_preempt_count(); pagefault_enable();
if (!ret) { if (!ret) {
switch (cmp) { switch (cmp) {
......
#ifndef __LINUX_UACCESS_H__ #ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__
#include <linux/preempt.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/*
* These routines enable/disable the pagefault handler in that
* it will not take any locks and go straight to the fixup table.
*
* They have great resemblance to the preempt_disable/enable calls
* and in fact they are identical; this is because currently there is
* no other way to make the pagefault handlers do this. So we do
* disable preemption but we don't necessarily care about that.
*/
static inline void pagefault_disable(void)
{
inc_preempt_count();
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier();
}
static inline void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
dec_preempt_count();
/*
* make sure we do..
*/
barrier();
preempt_check_resched();
}
#ifndef ARCH_HAS_NOCACHE_UACCESS #ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to, static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
...@@ -35,9 +70,9 @@ static inline unsigned long __copy_from_user_nocache(void *to, ...@@ -35,9 +70,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
({ \ ({ \
long ret; \ long ret; \
\ \
inc_preempt_count(); \ pagefault_disable(); \
ret = __get_user(retval, addr); \ ret = __get_user(retval, addr); \
dec_preempt_count(); \ pagefault_enable(); \
ret; \ ret; \
}) })
......
...@@ -282,9 +282,9 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from) ...@@ -282,9 +282,9 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
{ {
int ret; int ret;
inc_preempt_count(); pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
dec_preempt_count(); pagefault_enable();
return ret ? -EFAULT : 0; return ret ? -EFAULT : 0;
} }
...@@ -585,9 +585,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -585,9 +585,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!(uval & FUTEX_OWNER_DIED)) { if (!(uval & FUTEX_OWNER_DIED)) {
newval = FUTEX_WAITERS | new_owner->pid; newval = FUTEX_WAITERS | new_owner->pid;
inc_preempt_count(); pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
dec_preempt_count(); pagefault_enable();
if (curval == -EFAULT) if (curval == -EFAULT)
return -EFAULT; return -EFAULT;
if (curval != uval) if (curval != uval)
...@@ -618,9 +618,9 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) ...@@ -618,9 +618,9 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
* There is no waiter, so we unlock the futex. The owner died * There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner: * bit has not to be preserved here. We are the owner:
*/ */
inc_preempt_count(); pagefault_disable();
oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0); oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
dec_preempt_count(); pagefault_enable();
if (oldval == -EFAULT) if (oldval == -EFAULT)
return oldval; return oldval;
...@@ -1158,9 +1158,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, ...@@ -1158,9 +1158,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
*/ */
newval = current->pid; newval = current->pid;
inc_preempt_count(); pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval); curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
dec_preempt_count(); pagefault_enable();
if (unlikely(curval == -EFAULT)) if (unlikely(curval == -EFAULT))
goto uaddr_faulted; goto uaddr_faulted;
...@@ -1183,9 +1183,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, ...@@ -1183,9 +1183,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
uval = curval; uval = curval;
newval = uval | FUTEX_WAITERS; newval = uval | FUTEX_WAITERS;
inc_preempt_count(); pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
dec_preempt_count(); pagefault_enable();
if (unlikely(curval == -EFAULT)) if (unlikely(curval == -EFAULT))
goto uaddr_faulted; goto uaddr_faulted;
...@@ -1215,10 +1215,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, ...@@ -1215,10 +1215,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
newval = current->pid | newval = current->pid |
FUTEX_OWNER_DIED | FUTEX_WAITERS; FUTEX_OWNER_DIED | FUTEX_WAITERS;
inc_preempt_count(); pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, curval = futex_atomic_cmpxchg_inatomic(uaddr,
uval, newval); uval, newval);
dec_preempt_count(); pagefault_enable();
if (unlikely(curval == -EFAULT)) if (unlikely(curval == -EFAULT))
goto uaddr_faulted; goto uaddr_faulted;
...@@ -1390,9 +1390,9 @@ static int futex_unlock_pi(u32 __user *uaddr) ...@@ -1390,9 +1390,9 @@ static int futex_unlock_pi(u32 __user *uaddr)
* anyone else up: * anyone else up:
*/ */
if (!(uval & FUTEX_OWNER_DIED)) { if (!(uval & FUTEX_OWNER_DIED)) {
inc_preempt_count(); pagefault_disable();
uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
dec_preempt_count(); pagefault_enable();
} }
if (unlikely(uval == -EFAULT)) if (unlikely(uval == -EFAULT))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment