Commit 5eebb6f2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] might_sleep() improvements

From: Mitchell Blank Jr <mitch@sfgoth.com>

This patch makes the following improvements to might_sleep():

 o Add a "might_sleep_if()" macro for when we might sleep only if some
   condition is met.  It's a bit tidier, and has an unlikely() in it.

 o Add might_sleep checks to skb_share_check() and skb_unshare() which
   sometimes need to allocate memory.

 o Make all architectures call might_sleep() in both down() and
   down_interruptible().  Before only ppc, ppc64, and i386 did this check.
   (sh did the check on down() but not down_interruptible())
parent 55308a20
...@@ -110,6 +110,7 @@ static void __down(struct semaphore * sem) ...@@ -110,6 +110,7 @@ static void __down(struct semaphore * sem)
void down(struct semaphore *sem) void down(struct semaphore *sem)
{ {
might_sleep();
/* This atomically does: /* This atomically does:
* old_val = sem->count; * old_val = sem->count;
* new_val = sem->count - 1; * new_val = sem->count - 1;
...@@ -219,6 +220,7 @@ int down_interruptible(struct semaphore *sem) ...@@ -219,6 +220,7 @@ int down_interruptible(struct semaphore *sem)
{ {
int ret = 0; int ret = 0;
might_sleep();
/* This atomically does: /* This atomically does:
* old_val = sem->count; * old_val = sem->count;
* new_val = sem->count - 1; * new_val = sem->count - 1;
......
...@@ -88,14 +88,18 @@ extern void __up_wakeup(struct semaphore *); ...@@ -88,14 +88,18 @@ extern void __up_wakeup(struct semaphore *);
static inline void __down(struct semaphore *sem) static inline void __down(struct semaphore *sem)
{ {
long count = atomic_dec_return(&sem->count); long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0)) if (unlikely(count < 0))
__down_failed(sem); __down_failed(sem);
} }
static inline int __down_interruptible(struct semaphore *sem) static inline int __down_interruptible(struct semaphore *sem)
{ {
long count = atomic_dec_return(&sem->count); long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0)) if (unlikely(count < 0))
return __down_failed_interruptible(sem); return __down_failed_interruptible(sem);
return 0; return 0;
......
...@@ -88,7 +88,7 @@ static inline void down(struct semaphore * sem) ...@@ -88,7 +88,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__down_op(sem, __down_failed); __down_op(sem, __down_failed);
} }
...@@ -101,7 +101,7 @@ static inline int down_interruptible (struct semaphore * sem) ...@@ -101,7 +101,7 @@ static inline int down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
return __down_op_ret(sem, __down_interruptible_failed); return __down_op_ret(sem, __down_interruptible_failed);
} }
......
...@@ -84,7 +84,7 @@ static inline void down(struct semaphore * sem) ...@@ -84,7 +84,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__down_op(sem, __down_failed); __down_op(sem, __down_failed);
} }
...@@ -97,7 +97,7 @@ static inline int down_interruptible (struct semaphore * sem) ...@@ -97,7 +97,7 @@ static inline int down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
return __down_op_ret(sem, __down_interruptible_failed); return __down_op_ret(sem, __down_interruptible_failed);
} }
......
...@@ -79,6 +79,7 @@ extern inline void down(struct semaphore * sem) ...@@ -79,6 +79,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */ /* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags); local_save_flags(flags);
...@@ -104,6 +105,7 @@ extern inline int down_interruptible(struct semaphore * sem) ...@@ -104,6 +105,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */ /* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags); local_save_flags(flags);
......
...@@ -90,6 +90,7 @@ static inline void down(struct semaphore * sem) ...@@ -90,6 +90,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
count = &(sem->count); count = &(sem->count);
__asm__ __volatile__( __asm__ __volatile__(
...@@ -117,6 +118,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -117,6 +118,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
count = &(sem->count); count = &(sem->count);
__asm__ __volatile__( __asm__ __volatile__(
......
...@@ -73,6 +73,7 @@ down (struct semaphore *sem) ...@@ -73,6 +73,7 @@ down (struct semaphore *sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
__down(sem); __down(sem);
} }
...@@ -89,6 +90,7 @@ down_interruptible (struct semaphore * sem) ...@@ -89,6 +90,7 @@ down_interruptible (struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem); ret = __down_interruptible(sem);
return ret; return ret;
......
...@@ -89,7 +89,7 @@ extern inline void down(struct semaphore * sem) ...@@ -89,7 +89,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__asm__ __volatile__( __asm__ __volatile__(
"| atomic down operation\n\t" "| atomic down operation\n\t"
"subql #1,%0@\n\t" "subql #1,%0@\n\t"
...@@ -112,7 +112,7 @@ extern inline int down_interruptible(struct semaphore * sem) ...@@ -112,7 +112,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__asm__ __volatile__( __asm__ __volatile__(
"| atomic interruptible down operation\n\t" "| atomic interruptible down operation\n\t"
"subql #1,%1@\n\t" "subql #1,%1@\n\t"
......
...@@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem) ...@@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__asm__ __volatile__( __asm__ __volatile__(
"| atomic down operation\n\t" "| atomic down operation\n\t"
"movel %0, %%a1\n\t" "movel %0, %%a1\n\t"
...@@ -108,7 +108,7 @@ extern inline int down_interruptible(struct semaphore * sem) ...@@ -108,7 +108,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__asm__ __volatile__( __asm__ __volatile__(
"| atomic down operation\n\t" "| atomic down operation\n\t"
"movel %1, %%a1\n\t" "movel %1, %%a1\n\t"
......
...@@ -88,6 +88,7 @@ static inline void down(struct semaphore * sem) ...@@ -88,6 +88,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
__down(sem); __down(sem);
} }
...@@ -103,6 +104,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -103,6 +104,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem); ret = __down_interruptible(sem);
return ret; return ret;
......
...@@ -84,7 +84,7 @@ extern __inline__ void down(struct semaphore * sem) ...@@ -84,7 +84,7 @@ extern __inline__ void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
spin_lock_irq(&sem->sentry); spin_lock_irq(&sem->sentry);
if (sem->count > 0) { if (sem->count > 0) {
sem->count--; sem->count--;
...@@ -100,7 +100,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem) ...@@ -100,7 +100,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
spin_lock_irq(&sem->sentry); spin_lock_irq(&sem->sentry);
if (sem->count > 0) { if (sem->count > 0) {
sem->count--; sem->count--;
......
...@@ -60,6 +60,7 @@ asmlinkage void __up(struct semaphore * sem); ...@@ -60,6 +60,7 @@ asmlinkage void __up(struct semaphore * sem);
static inline void down(struct semaphore * sem) static inline void down(struct semaphore * sem)
{ {
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
__down(sem); __down(sem);
} }
...@@ -68,6 +69,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -68,6 +69,7 @@ static inline int down_interruptible(struct semaphore * sem)
{ {
int ret = 0; int ret = 0;
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem); ret = __down_interruptible(sem);
return ret; return ret;
......
...@@ -107,6 +107,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -107,6 +107,7 @@ static inline int down_interruptible(struct semaphore * sem)
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem); ret = __down_interruptible(sem);
return ret; return ret;
......
...@@ -71,6 +71,7 @@ static inline void down(struct semaphore * sem) ...@@ -71,6 +71,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
ptr = &(sem->count.counter); ptr = &(sem->count.counter);
increment = 1; increment = 1;
...@@ -107,6 +108,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -107,6 +108,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
ptr = &(sem->count.counter); ptr = &(sem->count.counter);
increment = 1; increment = 1;
......
...@@ -57,6 +57,7 @@ extern void __up (struct semaphore * sem); ...@@ -57,6 +57,7 @@ extern void __up (struct semaphore * sem);
extern inline void down (struct semaphore * sem) extern inline void down (struct semaphore * sem)
{ {
might_sleep();
if (atomic_dec_return (&sem->count) < 0) if (atomic_dec_return (&sem->count) < 0)
__down (sem); __down (sem);
} }
...@@ -64,6 +65,7 @@ extern inline void down (struct semaphore * sem) ...@@ -64,6 +65,7 @@ extern inline void down (struct semaphore * sem)
extern inline int down_interruptible (struct semaphore * sem) extern inline int down_interruptible (struct semaphore * sem)
{ {
int ret = 0; int ret = 0;
might_sleep();
if (atomic_dec_return (&sem->count) < 0) if (atomic_dec_return (&sem->count) < 0)
ret = __down_interruptible (sem); ret = __down_interruptible (sem);
return ret; return ret;
......
...@@ -118,6 +118,7 @@ static inline void down(struct semaphore * sem) ...@@ -118,6 +118,7 @@ static inline void down(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__asm__ __volatile__( __asm__ __volatile__(
"# atomic down operation\n\t" "# atomic down operation\n\t"
...@@ -144,6 +145,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -144,6 +145,7 @@ static inline int down_interruptible(struct semaphore * sem)
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
#endif #endif
might_sleep();
__asm__ __volatile__( __asm__ __volatile__(
"# atomic interruptible down operation\n\t" "# atomic interruptible down operation\n\t"
......
...@@ -52,8 +52,10 @@ struct completion; ...@@ -52,8 +52,10 @@ struct completion;
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line); void __might_sleep(char *file, int line);
#define might_sleep() __might_sleep(__FILE__, __LINE__) #define might_sleep() __might_sleep(__FILE__, __LINE__)
#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
#else #else
#define might_sleep() do {} while(0) #define might_sleep() do {} while(0)
#define might_sleep_if(cond) do {} while (0)
#endif #endif
extern struct notifier_block *panic_notifier_list; extern struct notifier_block *panic_notifier_list;
......
...@@ -389,6 +389,7 @@ static inline int skb_shared(struct sk_buff *skb) ...@@ -389,6 +389,7 @@ static inline int skb_shared(struct sk_buff *skb)
*/ */
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{ {
might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) { if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri); struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb); kfree_skb(skb);
...@@ -419,6 +420,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) ...@@ -419,6 +420,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
*/ */
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{ {
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri); struct sk_buff *nskb = skb_copy(skb, pri);
kfree_skb(skb); /* Free our shared copy */ kfree_skb(skb); /* Free our shared copy */
......
...@@ -543,8 +543,7 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -543,8 +543,7 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
int do_retry; int do_retry;
struct reclaim_state reclaim_state; struct reclaim_state reclaim_state;
if (wait) might_sleep_if(wait);
might_sleep();
cold = 0; cold = 0;
if (gfp_mask & __GFP_COLD) if (gfp_mask & __GFP_COLD)
......
...@@ -503,8 +503,7 @@ struct pte_chain *pte_chain_alloc(int gfp_flags) ...@@ -503,8 +503,7 @@ struct pte_chain *pte_chain_alloc(int gfp_flags)
struct pte_chain *ret; struct pte_chain *ret;
struct pte_chain **pte_chainp; struct pte_chain **pte_chainp;
if (gfp_flags & __GFP_WAIT) might_sleep_if(gfp_flags & __GFP_WAIT);
might_sleep();
pte_chainp = &get_cpu_var(local_pte_chain); pte_chainp = &get_cpu_var(local_pte_chain);
if (*pte_chainp) { if (*pte_chainp) {
......
...@@ -1814,8 +1814,7 @@ static void* cache_alloc_refill(kmem_cache_t* cachep, int flags) ...@@ -1814,8 +1814,7 @@ static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
static inline void static inline void
cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags) cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
{ {
if (flags & __GFP_WAIT) might_sleep_if(flags & __GFP_WAIT);
might_sleep();
#if DEBUG #if DEBUG
kmem_flagcheck(cachep, flags); kmem_flagcheck(cachep, flags);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment