Commit e9a088fa authored by Thomas Pugliese's avatar Thomas Pugliese Committed by Greg Kroah-Hartman

wusbcore: clean up list locking in urb enqueue

wa_urb_enqueue_run locks and unlocks its list lock as it traverses the
list of queued transfers.  This was done to prevent deadlocking due to
acquiring locks in reverse order in different places.  The problem is that
releasing the lock during the list traversal could allow the dequeue
routine to corrupt the list while it is being iterated over.  This patch
moves all list entries to a temp list while holding the list lock, then
traverses the temp list with no lock held.
Signed-off-by: default avatarThomas Pugliese <thomas.pugliese@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 467d296f
...@@ -928,7 +928,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) ...@@ -928,7 +928,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
spin_lock_irqsave(&rpipe->seg_lock, flags); spin_lock_irqsave(&rpipe->seg_lock, flags);
while (atomic_read(&rpipe->segs_available) > 0 while (atomic_read(&rpipe->segs_available) > 0
&& !list_empty(&rpipe->seg_list)) { && !list_empty(&rpipe->seg_list)) {
seg = list_entry(rpipe->seg_list.next, struct wa_seg, seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
list_node); list_node);
list_del(&seg->list_node); list_del(&seg->list_node);
xfer = seg->xfer; xfer = seg->xfer;
...@@ -1093,30 +1093,35 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer) ...@@ -1093,30 +1093,35 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
* *
* We need to be careful here, as dequeue() could be called in the * We need to be careful here, as dequeue() could be called in the
* middle. That's why we do the whole thing under the * middle. That's why we do the whole thing under the
* wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
* and then checks the list -- so as we would be acquiring in inverse * and then checks the list -- so as we would be acquiring in inverse
* order, we just drop the lock once we have the xfer and reacquire it * order, we move the delayed list to a separate list while locked and then
* later. * submit them without the list lock held.
*/ */
void wa_urb_enqueue_run(struct work_struct *ws) void wa_urb_enqueue_run(struct work_struct *ws)
{ {
struct wahc *wa = container_of(ws, struct wahc, xfer_work); struct wahc *wa = container_of(ws, struct wahc, xfer_work);
struct wa_xfer *xfer, *next; struct wa_xfer *xfer, *next;
struct urb *urb; struct urb *urb;
LIST_HEAD(tmp_list);
/* Create a copy of the wa->xfer_delayed_list while holding the lock */
spin_lock_irq(&wa->xfer_list_lock); spin_lock_irq(&wa->xfer_list_lock);
list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, list_cut_position(&tmp_list, &wa->xfer_delayed_list,
list_node) { wa->xfer_delayed_list.prev);
spin_unlock_irq(&wa->xfer_list_lock);
/*
* enqueue from temp list without list lock held since wa_urb_enqueue_b
* can take xfer->lock as well as lock mutexes.
*/
list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
list_del_init(&xfer->list_node); list_del_init(&xfer->list_node);
spin_unlock_irq(&wa->xfer_list_lock);
urb = xfer->urb; urb = xfer->urb;
wa_urb_enqueue_b(xfer); wa_urb_enqueue_b(xfer);
usb_put_urb(urb); /* taken when queuing */ usb_put_urb(urb); /* taken when queuing */
spin_lock_irq(&wa->xfer_list_lock);
} }
spin_unlock_irq(&wa->xfer_list_lock);
} }
EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment