Commit 98e58b01 authored by Hauke Mehrtens's avatar Hauke Mehrtens Committed by Ralf Baechle

MIPS: Lantiq: Lock DMA register accesses for SMP

The DMA controller channel and port configuration is changed by
selecting the port or channel in one register and then update the
configuration in other registers. This has to be done in an atomic
operation. Previously only the local interrupts were deactivated which
works for single CPU systems. If the system supports SMP a better
locking is needed, use spinlocks instead.
On more recent SoCs (at least xrx200 and later) there are two memory
regions to change the configuration, there we could use one area for
each CPU and do not have to synchronize between the CPUs and more.
Signed-off-by: default avatarHauke Mehrtens <hauke@hauke-m.de>
Cc: john@phrozen.org
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/14912/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent b3d91db3
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -59,16 +60,17 @@ ...@@ -59,16 +60,17 @@
ltq_dma_membase + (z)) ltq_dma_membase + (z))
static void __iomem *ltq_dma_membase; static void __iomem *ltq_dma_membase;
static DEFINE_SPINLOCK(ltq_dma_lock);
void void
ltq_dma_enable_irq(struct ltq_dma_channel *ch) ltq_dma_enable_irq(struct ltq_dma_channel *ch)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
local_irq_restore(flags); spin_unlock_irqrestore(&ltq_dma_lock, flags);
} }
EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
...@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_channel *ch) ...@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_channel *ch)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
local_irq_restore(flags); spin_unlock_irqrestore(&ltq_dma_lock, flags);
} }
EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
...@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *ch) ...@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *ch)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
local_irq_restore(flags); spin_unlock_irqrestore(&ltq_dma_lock, flags);
} }
EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
...@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch) ...@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
{ {
unsigned long flag; unsigned long flag;
local_irq_save(flag); spin_lock_irqsave(&ltq_dma_lock, flag);
ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
ltq_dma_enable_irq(ch); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
local_irq_restore(flag); spin_unlock_irqrestore(&ltq_dma_lock, flag);
} }
EXPORT_SYMBOL_GPL(ltq_dma_open); EXPORT_SYMBOL_GPL(ltq_dma_open);
...@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch) ...@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch)
{ {
unsigned long flag; unsigned long flag;
local_irq_save(flag); spin_lock_irqsave(&ltq_dma_lock, flag);
ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
ltq_dma_disable_irq(ch); ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
local_irq_restore(flag); spin_unlock_irqrestore(&ltq_dma_lock, flag);
} }
EXPORT_SYMBOL_GPL(ltq_dma_close); EXPORT_SYMBOL_GPL(ltq_dma_close);
...@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch) ...@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
&ch->phys, GFP_ATOMIC); &ch->phys, GFP_ATOMIC);
memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
local_irq_save(flags); spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
...@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch) ...@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
; ;
local_irq_restore(flags); spin_unlock_irqrestore(&ltq_dma_lock, flags);
} }
void void
...@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel *ch) ...@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
ltq_dma_alloc(ch); ltq_dma_alloc(ch);
local_irq_save(flags); spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
local_irq_restore(flags); spin_unlock_irqrestore(&ltq_dma_lock, flags);
} }
EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
...@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel *ch) ...@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
ltq_dma_alloc(ch); ltq_dma_alloc(ch);
local_irq_save(flags); spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
local_irq_restore(flags); spin_unlock_irqrestore(&ltq_dma_lock, flags);
} }
EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment