Commit a2c72fae authored by Linus Torvalds's avatar Linus Torvalds

Fix subtle bug in "finish_wait()", which can cause kernel stack

corruption on SMP because of another CPU still accessing a waitqueue
even after it was de-allocated.

Use a careful version of the list emptiness check to make sure we
don't de-allocate the stack frame before the waitqueue is all done.
parent 8cc86c08
...@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head) ...@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head)
return head->next == head; return head->next == head;
} }
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list, static inline void __list_splice(struct list_head *list,
struct list_head *head) struct list_head *head)
{ {
......
...@@ -159,7 +159,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) ...@@ -159,7 +159,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
unsigned long flags; unsigned long flags;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (!list_empty(&wait->task_list)) { /*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list); list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment