Commit 2a6da97f authored by Thomas Pugliese's avatar Thomas Pugliese Committed by Greg Kroah-Hartman

usb: wusbcore: fix potential double list_del on urb dequeue

This patch locks rpipe->seg_lock around the entire transfer segment
cleanup loop in wa_urb_dequeue instead of just one case of the switch
statement.  This fixes a race between __wa_xfer_delayed_run and
wa_urb_dequeue where a transfer segment in the WA_SEG_DELAYED state
could be removed from the rpipe seg_list twice leading to memory
corruption.  It also switches the spin_lock call to use the non-irqsave
version since the xfer->lock is already held and irqs already disabled.
Signed-off-by: default avatarThomas Pugliese <thomas.pugliese@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3c1b2c3e
...@@ -1974,6 +1974,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) ...@@ -1974,6 +1974,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
goto out_unlock; /* setup(), enqueue_b() completes */ goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/ /* Ok, the xfer is in flight already, it's been setup and submitted.*/
xfer_abort_pending = __wa_xfer_abort(xfer) >= 0; xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
/*
* grab the rpipe->seg_lock here to prevent racing with
* __wa_xfer_delayed_run.
*/
spin_lock(&rpipe->seg_lock);
for (cnt = 0; cnt < xfer->segs; cnt++) { for (cnt = 0; cnt < xfer->segs; cnt++) {
seg = xfer->seg[cnt]; seg = xfer->seg[cnt];
pr_debug("%s: xfer id 0x%08X#%d status = %d\n", pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
...@@ -1994,10 +1999,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) ...@@ -1994,10 +1999,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
*/ */
seg->status = WA_SEG_ABORTED; seg->status = WA_SEG_ABORTED;
seg->result = -ENOENT; seg->result = -ENOENT;
spin_lock_irqsave(&rpipe->seg_lock, flags2);
list_del(&seg->list_node); list_del(&seg->list_node);
xfer->segs_done++; xfer->segs_done++;
spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
break; break;
case WA_SEG_DONE: case WA_SEG_DONE:
case WA_SEG_ERROR: case WA_SEG_ERROR:
...@@ -2026,6 +2029,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) ...@@ -2026,6 +2029,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
break; break;
} }
} }
spin_unlock(&rpipe->seg_lock);
xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
done = __wa_xfer_is_done(xfer); done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags); spin_unlock_irqrestore(&xfer->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment