Commit 764478f4 authored by Johan Hovold's avatar Johan Hovold Committed by Greg Kroah-Hartman

USB: cdc-acm: fix unthrottle races

Fix two long-standing bugs which could potentially lead to memory
corruption or leave the port throttled until it is reopened (on weakly
ordered systems), respectively, when read-URB completion races with
unthrottle().

First, the URB must not be marked as free before processing is complete
to prevent it from being submitted by unthrottle() on another CPU.

	CPU 1				CPU 2
	================		================
	complete()			unthrottle()
	  process_urb();
	  smp_mb__before_atomic();
	  set_bit(i, free);		  if (test_and_clear_bit(i, free))
						  submit_urb();

Second, the URB must be marked as free before checking the throttled
flag to prevent unthrottle() on another CPU from failing to observe that
the URB needs to be submitted if complete() sees that the throttled flag
is set.

	CPU 1				CPU 2
	================		================
	complete()			unthrottle()
	  set_bit(i, free);		  throttled = 0;
	  smp_mb__after_atomic();	  smp_mb();
	  if (throttled)		  if (test_and_clear_bit(i, free))
		  return;			  submit_urb();

Note that test_and_clear_bit() only implies barriers when the test is
successful. To handle the case where the URB is still in use an explicit
barrier needs to be added to unthrottle() for the second race condition.

Also note that the first race was fixed by 36e59e0d ("cdc-acm: fix
race between callback and unthrottle") back in 2015, but the bug was
reintroduced a year later.

Fixes: 1aba579f ("cdc-acm: handle read pipe errors")
Fixes: 088c64f8 ("USB: cdc-acm: re-write read processing")
Signed-off-by: default avatarJohan Hovold <johan@kernel.org>
Acked-by: default avatarOliver Neukum <oneukum@suse.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c2d18126
...@@ -470,12 +470,12 @@ static void acm_read_bulk_callback(struct urb *urb) ...@@ -470,12 +470,12 @@ static void acm_read_bulk_callback(struct urb *urb)
struct acm *acm = rb->instance; struct acm *acm = rb->instance;
unsigned long flags; unsigned long flags;
int status = urb->status; int status = urb->status;
bool stopped = false;
bool stalled = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status); rb->index, urb->actual_length, status);
set_bit(rb->index, &acm->read_urbs_free);
if (!acm->dev) { if (!acm->dev) {
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return; return;
...@@ -488,15 +488,16 @@ static void acm_read_bulk_callback(struct urb *urb) ...@@ -488,15 +488,16 @@ static void acm_read_bulk_callback(struct urb *urb)
break; break;
case -EPIPE: case -EPIPE:
set_bit(EVENT_RX_STALL, &acm->flags); set_bit(EVENT_RX_STALL, &acm->flags);
schedule_work(&acm->work); stalled = true;
return; break;
case -ENOENT: case -ENOENT:
case -ECONNRESET: case -ECONNRESET:
case -ESHUTDOWN: case -ESHUTDOWN:
dev_dbg(&acm->data->dev, dev_dbg(&acm->data->dev,
"%s - urb shutting down with status: %d\n", "%s - urb shutting down with status: %d\n",
__func__, status); __func__, status);
return; stopped = true;
break;
default: default:
dev_dbg(&acm->data->dev, dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n", "%s - nonzero urb status received: %d\n",
...@@ -505,10 +506,24 @@ static void acm_read_bulk_callback(struct urb *urb) ...@@ -505,10 +506,24 @@ static void acm_read_bulk_callback(struct urb *urb)
} }
/* /*
* Unthrottle may run on another CPU which needs to see events * Make sure URB processing is done before marking as free to avoid
* in the same order. Submission has an implict barrier * racing with unthrottle() on another CPU. Matches the barriers
* implied by the test_and_clear_bit() in acm_submit_read_urb().
*/ */
smp_mb__before_atomic(); smp_mb__before_atomic();
set_bit(rb->index, &acm->read_urbs_free);
/*
* Make sure URB is marked as free before checking the throttled flag
* to avoid racing with unthrottle() on another CPU. Matches the
* smp_mb() in unthrottle().
*/
smp_mb__after_atomic();
if (stopped || stalled) {
if (stalled)
schedule_work(&acm->work);
return;
}
/* throttle device if requested by tty */ /* throttle device if requested by tty */
spin_lock_irqsave(&acm->read_lock, flags); spin_lock_irqsave(&acm->read_lock, flags);
...@@ -842,6 +857,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty) ...@@ -842,6 +857,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
acm->throttle_req = 0; acm->throttle_req = 0;
spin_unlock_irq(&acm->read_lock); spin_unlock_irq(&acm->read_lock);
/* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
smp_mb();
if (was_throttled) if (was_throttled)
acm_submit_read_urbs(acm, GFP_KERNEL); acm_submit_read_urbs(acm, GFP_KERNEL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment