Commit c1038165 authored by David Jander's avatar David Jander Committed by Mark Brown

spi: Lock controller idling transition inside the io_mutex

This way, the spi sync path does not need to deal with the idling
transition.
Signed-off-by: default avatarDavid Jander <david@protonic.nl>
Link: https://lore.kernel.org/r/20220621061234.3626638-4-david@protonic.nlSigned-off-by: default avatarMark Brown <broonie@kernel.org>
parent ae7d2346
...@@ -1643,27 +1643,30 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ...@@ -1643,27 +1643,30 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
unsigned long flags; unsigned long flags;
int ret; int ret;
/* Take the IO mutex */
mutex_lock(&ctlr->io_mutex);
/* Lock queue */ /* Lock queue */
spin_lock_irqsave(&ctlr->queue_lock, flags); spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */ /* Make sure we are not already running a message */
if (ctlr->cur_msg) { if (ctlr->cur_msg) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return; goto out_unlock;
} }
/* If another context is idling the device then defer */ /* If another context is idling the device then defer */
if (ctlr->idling) { if (ctlr->idling) {
kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return; goto out_unlock;
} }
/* Check if the queue is idle */ /* Check if the queue is idle */
if (list_empty(&ctlr->queue) || !ctlr->running) { if (list_empty(&ctlr->queue) || !ctlr->running) {
if (!ctlr->busy) { if (!ctlr->busy) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return; goto out_unlock;
} }
/* Defer any non-atomic teardown to the thread */ /* Defer any non-atomic teardown to the thread */
...@@ -1679,7 +1682,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ...@@ -1679,7 +1682,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
&ctlr->pump_messages); &ctlr->pump_messages);
} }
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return; goto out_unlock;
} }
ctlr->busy = false; ctlr->busy = false;
...@@ -1701,7 +1704,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ...@@ -1701,7 +1704,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ctlr->idling = false; ctlr->idling = false;
ctlr->queue_empty = true; ctlr->queue_empty = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return; goto out_unlock;
} }
/* Extract head of queue */ /* Extract head of queue */
...@@ -1715,13 +1718,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ...@@ -1715,13 +1718,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ctlr->busy = true; ctlr->busy = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
mutex_lock(&ctlr->io_mutex);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy); ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
mutex_unlock(&ctlr->io_mutex); mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */ /* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret) if (!ret)
cond_resched(); cond_resched();
return;
out_unlock:
mutex_unlock(&ctlr->io_mutex);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment