Commit dea9c80e authored by Bartosz Golaszewski's avatar Bartosz Golaszewski

gpiolib: rework the locking mechanism for lineevent kfifo

The read_lock mutex is supposed to prevent collisions between reading
and writing to the line event kfifo but it's actually only taken when
the events are being read from it.

Drop the mutex entirely and reuse the spinlock made available to us in
the waitqueue struct. Take the lock whenever the fifo is modified or
inspected. Drop the call to kfifo_to_user() and instead first extract
the new element from kfifo when the lock is taken and only then pass
it on to the user after the spinlock is released.
Signed-off-by: default avatarBartosz Golaszewski <bgolaszewski@baylibre.com>
Reviewed-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
parent 5195a89e
...@@ -787,8 +787,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) ...@@ -787,8 +787,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
* @irq: the interrupt that trigger in response to events on this GPIO * @irq: the interrupt that trigger in response to events on this GPIO
* @wait: wait queue that handles blocking reads of events * @wait: wait queue that handles blocking reads of events
* @events: KFIFO for the GPIO events * @events: KFIFO for the GPIO events
* @read_lock: mutex lock to protect reads from colliding with adding
* new events to the FIFO
* @timestamp: cache for the timestamp storing it between hardirq * @timestamp: cache for the timestamp storing it between hardirq
* and IRQ thread, used to bring the timestamp close to the actual * and IRQ thread, used to bring the timestamp close to the actual
* event * event
...@@ -801,7 +799,6 @@ struct lineevent_state { ...@@ -801,7 +799,6 @@ struct lineevent_state {
int irq; int irq;
wait_queue_head_t wait; wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16); DECLARE_KFIFO(events, struct gpioevent_data, 16);
struct mutex read_lock;
u64 timestamp; u64 timestamp;
}; };
...@@ -817,7 +814,7 @@ static __poll_t lineevent_poll(struct file *filep, ...@@ -817,7 +814,7 @@ static __poll_t lineevent_poll(struct file *filep,
poll_wait(filep, &le->wait, wait); poll_wait(filep, &le->wait, wait);
if (!kfifo_is_empty(&le->events)) if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
events = EPOLLIN | EPOLLRDNORM; events = EPOLLIN | EPOLLRDNORM;
return events; return events;
...@@ -830,43 +827,52 @@ static ssize_t lineevent_read(struct file *filep, ...@@ -830,43 +827,52 @@ static ssize_t lineevent_read(struct file *filep,
loff_t *f_ps) loff_t *f_ps)
{ {
struct lineevent_state *le = filep->private_data; struct lineevent_state *le = filep->private_data;
unsigned int copied; struct gpioevent_data event;
ssize_t bytes_read = 0;
int ret; int ret;
if (count < sizeof(struct gpioevent_data)) if (count < sizeof(event))
return -EINVAL; return -EINVAL;
do { do {
spin_lock(&le->wait.lock);
if (kfifo_is_empty(&le->events)) { if (kfifo_is_empty(&le->events)) {
if (filep->f_flags & O_NONBLOCK) if (bytes_read) {
spin_unlock(&le->wait.lock);
return bytes_read;
}
if (filep->f_flags & O_NONBLOCK) {
spin_unlock(&le->wait.lock);
return -EAGAIN; return -EAGAIN;
}
ret = wait_event_interruptible(le->wait, ret = wait_event_interruptible_locked(le->wait,
!kfifo_is_empty(&le->events)); !kfifo_is_empty(&le->events));
if (ret) if (ret) {
spin_unlock(&le->wait.lock);
return ret; return ret;
}
} }
if (mutex_lock_interruptible(&le->read_lock)) ret = kfifo_out(&le->events, &event, 1);
return -ERESTARTSYS; spin_unlock(&le->wait.lock);
ret = kfifo_to_user(&le->events, buf, count, &copied); if (ret != 1) {
mutex_unlock(&le->read_lock); /*
* This should never happen - we were holding the lock
if (ret) * from the moment we learned the fifo is no longer
return ret; * empty until now.
*/
/* ret = -EIO;
* If we couldn't read anything from the fifo (a different break;
* thread might have been faster) we either return -EAGAIN if }
* the file descriptor is non-blocking, otherwise we go back to
* sleep and wait for more data to arrive.
*/
if (copied == 0 && (filep->f_flags & O_NONBLOCK))
return -EAGAIN;
} while (copied == 0); if (copy_to_user(buf + bytes_read, &event, sizeof(event)))
return -EFAULT;
bytes_read += sizeof(event);
} while (count >= bytes_read + sizeof(event));
return copied; return bytes_read;
} }
static int lineevent_release(struct inode *inode, struct file *filep) static int lineevent_release(struct inode *inode, struct file *filep)
...@@ -968,7 +974,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) ...@@ -968,7 +974,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_NONE; return IRQ_NONE;
} }
ret = kfifo_put(&le->events, ge); ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
1, &le->wait.lock);
if (ret) if (ret)
wake_up_poll(&le->wait, EPOLLIN); wake_up_poll(&le->wait, EPOLLIN);
...@@ -1083,7 +1090,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) ...@@ -1083,7 +1090,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
INIT_KFIFO(le->events); INIT_KFIFO(le->events);
init_waitqueue_head(&le->wait); init_waitqueue_head(&le->wait);
mutex_init(&le->read_lock);
/* Request a thread to read the events */ /* Request a thread to read the events */
ret = request_threaded_irq(le->irq, ret = request_threaded_irq(le->irq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment