Commit 95e5fda3 authored by Eric Pilmore's avatar Eric Pilmore Committed by Vinod Koul

ptdma: pt_core_execute_cmd() should use spinlock

The interrupt handler (pt_core_irq_handler()) of the ptdma
driver can be called from interrupt context. The code flow
in this function can lead down to pt_core_execute_cmd() which
will attempt to grab a mutex, which is not appropriate in
interrupt context and ultimately leads to a kernel panic.
The fix here changes this mutex to a spinlock, which has
been verified to resolve the issue.

Fixes: fa5d823b ("dmaengine: ptdma: Initial driver for the AMD PTDMA")
Signed-off-by: default avatarEric Pilmore <epilmore@gigaio.com>
Link: https://lore.kernel.org/r/20230119033907.35071-1-epilmore@gigaio.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent a7a7ee6f
...@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd ...@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0); bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail; u32 tail;
unsigned long flags;
if (soc) { if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0); desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC; desc->dw0 &= ~DWORD0_SOC;
} }
mutex_lock(&cmd_q->q_mutex); spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */ /* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32); memcpy(q_desc, desc, 32);
...@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd ...@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
/* Turn the queue back on using our cached control register */ /* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q); pt_start_queue(cmd_q);
mutex_unlock(&cmd_q->q_mutex); spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0; return 0;
} }
...@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt) ...@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
cmd_q->pt = pt; cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool; cmd_q->dma_pool = dma_pool;
mutex_init(&cmd_q->q_mutex); spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */ /* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
......
...@@ -196,7 +196,7 @@ struct pt_cmd_queue { ...@@ -196,7 +196,7 @@ struct pt_cmd_queue {
struct ptdma_desc *qbase; struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */ /* Aligned queue start address (per requirement) */
struct mutex q_mutex ____cacheline_aligned; spinlock_t q_lock ____cacheline_aligned;
unsigned int qidx; unsigned int qidx;
unsigned int qsize; unsigned int qsize;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment