Commit 6a20dbd6 authored by Manfred Schlaegl's avatar Manfred Schlaegl Committed by Greg Kroah-Hartman

tty: Fix race condition between __tty_buffer_request_room and flush_to_ldisc

The race was introduced while development of linux-3.11 by
e8437d7e and
e9975fde.
Originally it was found and reproduced on linux-3.12.15 and
linux-3.12.15-rt25, by sending 500 byte blocks with 115kbaud to the
target uart in a loop with 100 milliseconds delay.

In short:
 1. The consumer flush_to_ldisc is on to remove the head tty_buffer.
 2. The producer adds a number of bytes, so that a new tty_buffer must
	be allocated and added by __tty_buffer_request_room.
 3. The consumer removes the head tty_buffer element, without handling
	newly committed data.

Detailed example:
 * Initial buffer:
   * Head, Tail -> 0: used=250; commit=250; read=240; next=NULL
 * Consumer: ''flush_to_ldisc''
   * consumed 10 Byte
   * buffer:
     * Head, Tail -> 0: used=250; commit=250; read=250; next=NULL
{{{
		count = head->commit - head->read;	// count = 0
		if (!count) {				// enter
			// INTERRUPTED BY PRODUCER ->
			if (head->next == NULL)
				break;
			buf->head = head->next;
			tty_buffer_free(port, head);
			continue;
		}
}}}
 * Producer: tty_insert_flip_... 10 bytes + tty_flip_buffer_push
   * buffer:
     * Head, Tail -> 0: used=250; commit=250; read=250; next=NULL
   * added 6 bytes: head-element filled to maximum.
     * buffer:
       * Head, Tail -> 0: used=256; commit=250; read=250; next=NULL
   * added 4 bytes: __tty_buffer_request_room is called
     * buffer:
       * Head -> 0: used=256; commit=256; read=250; next=1
       * Tail -> 1: used=4; commit=0; read=250 next=NULL
   * push (tty_flip_buffer_push)
     * buffer:
       * Head -> 0: used=256; commit=256; read=250; next=1
       * Tail -> 1: used=4; commit=4; read=250 next=NULL
 * Consumer
{{{
		count = head->commit - head->read;
		if (!count) {
			// INTERRUPTED BY PRODUCER <-
			if (head->next == NULL)		// -> no break
				break;
			buf->head = head->next;
			tty_buffer_free(port, head);
			// ERROR: tty_buffer head freed -> 6 bytes lost
			continue;
		}
}}}

This patch reintroduces a spin_lock to protect this case. Perhaps later
a lock-less solution could be found.
Signed-off-by: default avatarManfred Schlaegl <manfred.schlaegl@gmx.at>
Cc: stable <stable@vger.kernel.org> # 3.11
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a798c10f
...@@ -255,11 +255,16 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, ...@@ -255,11 +255,16 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
if (change || left < size) { if (change || left < size) {
/* This is the slow path - looking for new buffers to use */ /* This is the slow path - looking for new buffers to use */
if ((n = tty_buffer_alloc(port, size)) != NULL) { if ((n = tty_buffer_alloc(port, size)) != NULL) {
unsigned long iflags;
n->flags = flags; n->flags = flags;
buf->tail = n; buf->tail = n;
spin_lock_irqsave(&buf->flush_lock, iflags);
b->commit = b->used; b->commit = b->used;
smp_mb();
b->next = n; b->next = n;
spin_unlock_irqrestore(&buf->flush_lock, iflags);
} else if (change) } else if (change)
size = 0; size = 0;
else else
...@@ -443,6 +448,7 @@ static void flush_to_ldisc(struct work_struct *work) ...@@ -443,6 +448,7 @@ static void flush_to_ldisc(struct work_struct *work)
mutex_lock(&buf->lock); mutex_lock(&buf->lock);
while (1) { while (1) {
unsigned long flags;
struct tty_buffer *head = buf->head; struct tty_buffer *head = buf->head;
int count; int count;
...@@ -450,14 +456,19 @@ static void flush_to_ldisc(struct work_struct *work) ...@@ -450,14 +456,19 @@ static void flush_to_ldisc(struct work_struct *work)
if (atomic_read(&buf->priority)) if (atomic_read(&buf->priority))
break; break;
spin_lock_irqsave(&buf->flush_lock, flags);
count = head->commit - head->read; count = head->commit - head->read;
if (!count) { if (!count) {
if (head->next == NULL) if (head->next == NULL) {
spin_unlock_irqrestore(&buf->flush_lock, flags);
break; break;
}
buf->head = head->next; buf->head = head->next;
spin_unlock_irqrestore(&buf->flush_lock, flags);
tty_buffer_free(port, head); tty_buffer_free(port, head);
continue; continue;
} }
spin_unlock_irqrestore(&buf->flush_lock, flags);
count = receive_buf(tty, head, count); count = receive_buf(tty, head, count);
if (!count) if (!count)
...@@ -512,6 +523,7 @@ void tty_buffer_init(struct tty_port *port) ...@@ -512,6 +523,7 @@ void tty_buffer_init(struct tty_port *port)
struct tty_bufhead *buf = &port->buf; struct tty_bufhead *buf = &port->buf;
mutex_init(&buf->lock); mutex_init(&buf->lock);
spin_lock_init(&buf->flush_lock);
tty_buffer_reset(&buf->sentinel, 0); tty_buffer_reset(&buf->sentinel, 0);
buf->head = &buf->sentinel; buf->head = &buf->sentinel;
buf->tail = &buf->sentinel; buf->tail = &buf->sentinel;
......
...@@ -61,6 +61,7 @@ struct tty_bufhead { ...@@ -61,6 +61,7 @@ struct tty_bufhead {
struct tty_buffer *head; /* Queue head */ struct tty_buffer *head; /* Queue head */
struct work_struct work; struct work_struct work;
struct mutex lock; struct mutex lock;
spinlock_t flush_lock;
atomic_t priority; atomic_t priority;
struct tty_buffer sentinel; struct tty_buffer sentinel;
struct llist_head free; /* Free queue head */ struct llist_head free; /* Free queue head */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment