Commit b42e0359 authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Mark Brown

spi: rspi: Use core SPI_MASTER_MUST_[RT]X handling

RSPI needs dummy transfers to generate the SPI clock on receive.
RSPI-RZ and QSPI always do both transmit and receive.

Use the SPI core SPI_MASTER_MUST_RX/SPI_MASTER_MUST_TX infrastructure
instead of checking for the presence of buffers and providing dummy data
ourselves (for PIO), or providing a dummy buffer (for DMA).

rspi_receive_dma() now provides full duplex DMA transfers on RSPI, and is
renamed to rspi_send_receive_dma().

As the SPI core will always provide a TX buffer, the logic to choose
between DMA send and DMA send/receive in rspi_transfer_one() now has to
check for the presence of an RX buffer. Likewise for the DMA availability
tests in rspi_is_dma().

The buffer tests in qspi_transfer_one() are now always true, so they're
removed.
Signed-off-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: default avatarMark Brown <broonie@linaro.org>
parent 9c5de2c1
...@@ -183,8 +183,6 @@ ...@@ -183,8 +183,6 @@
#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
#define DUMMY_DATA 0x00
struct rspi_data { struct rspi_data {
void __iomem *addr; void __iomem *addr;
u32 max_speed_hz; u32 max_speed_hz;
...@@ -252,6 +250,7 @@ struct spi_ops { ...@@ -252,6 +250,7 @@ struct spi_ops {
int (*transfer_one)(struct spi_master *master, struct spi_device *spi, int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer); struct spi_transfer *xfer);
u16 mode_bits; u16 mode_bits;
u16 flags;
}; };
/* /*
...@@ -552,42 +551,38 @@ static void qspi_receive_init(const struct rspi_data *rspi) ...@@ -552,42 +551,38 @@ static void qspi_receive_init(const struct rspi_data *rspi)
rspi_write8(rspi, 0, QSPI_SPBFCR); rspi_write8(rspi, 0, QSPI_SPBFCR);
} }
static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) static int rspi_send_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
{ {
struct scatterlist sg, sg_dummy; struct scatterlist sg_rx, sg_tx;
void *dummy = NULL, *rx_buf = t->rx_buf; const void *tx_buf = t->tx_buf;
struct dma_async_tx_descriptor *desc, *desc_dummy; void *rx_buf = t->rx_buf;
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
unsigned int len = t->len; unsigned int len = t->len;
int ret = 0; int ret = 0;
/* prepare dummy transfer to generate SPI clocks */ /* prepare transmit transfer */
dummy = kzalloc(len, GFP_KERNEL); if (!rspi_dma_map_sg(&sg_tx, tx_buf, len, rspi->chan_tx,
if (!dummy) { DMA_TO_DEVICE))
ret = -ENOMEM; return -EFAULT;
goto end_nomap;
} desc_tx = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_tx, 1,
if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
DMA_TO_DEVICE)) {
ret = -EFAULT;
goto end_nomap;
}
desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_dummy) { if (!desc_tx) {
ret = -EIO; ret = -EIO;
goto end_dummy_mapped; goto end_tx_mapped;
} }
/* prepare receive transfer */ /* prepare receive transfer */
if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx, if (!rspi_dma_map_sg(&sg_rx, rx_buf, len, rspi->chan_rx,
DMA_FROM_DEVICE)) { DMA_FROM_DEVICE)) {
ret = -EFAULT; ret = -EFAULT;
goto end_dummy_mapped; goto end_tx_mapped;
} }
desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE, desc_rx = dmaengine_prep_slave_sg(rspi->chan_rx, &sg_rx, 1,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) { if (!desc_rx) {
ret = -EIO; ret = -EIO;
goto end; goto end;
} }
...@@ -606,13 +601,13 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) ...@@ -606,13 +601,13 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE); rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
rspi->dma_callbacked = 0; rspi->dma_callbacked = 0;
desc->callback = rspi_dma_complete; desc_rx->callback = rspi_dma_complete;
desc->callback_param = rspi; desc_rx->callback_param = rspi;
dmaengine_submit(desc); dmaengine_submit(desc_rx);
dma_async_issue_pending(rspi->chan_rx); dma_async_issue_pending(rspi->chan_rx);
desc_dummy->callback = NULL; /* No callback */ desc_tx->callback = NULL; /* No callback */
dmaengine_submit(desc_dummy); dmaengine_submit(desc_tx);
dma_async_issue_pending(rspi->chan_tx); dma_async_issue_pending(rspi->chan_tx);
ret = wait_event_interruptible_timeout(rspi->wait, ret = wait_event_interruptible_timeout(rspi->wait,
...@@ -628,21 +623,19 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) ...@@ -628,21 +623,19 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
enable_irq(rspi->rx_irq); enable_irq(rspi->rx_irq);
end: end:
rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE); rspi_dma_unmap_sg(&sg_rx, rspi->chan_rx, DMA_FROM_DEVICE);
end_dummy_mapped: end_tx_mapped:
rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE); rspi_dma_unmap_sg(&sg_tx, rspi->chan_tx, DMA_TO_DEVICE);
end_nomap:
kfree(dummy);
return ret; return ret;
} }
static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t) static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
{ {
if (t->tx_buf && rspi->chan_tx)
return 1;
/* If the module receives data by DMAC, it also needs TX DMAC */ /* If the module receives data by DMAC, it also needs TX DMAC */
if (t->rx_buf && rspi->chan_tx && rspi->chan_rx) if (t->rx_buf)
return rspi->chan_tx && rspi->chan_rx;
if (rspi->chan_tx)
return 1; return 1;
return 0; return 0;
...@@ -654,7 +647,7 @@ static int rspi_transfer_out_in(struct rspi_data *rspi, ...@@ -654,7 +647,7 @@ static int rspi_transfer_out_in(struct rspi_data *rspi,
int remain = xfer->len, ret; int remain = xfer->len, ret;
const u8 *tx_buf = xfer->tx_buf; const u8 *tx_buf = xfer->tx_buf;
u8 *rx_buf = xfer->rx_buf; u8 *rx_buf = xfer->rx_buf;
u8 spcr, data; u8 spcr;
spcr = rspi_read8(rspi, RSPI_SPCR); spcr = rspi_read8(rspi, RSPI_SPCR);
if (rx_buf) { if (rx_buf) {
...@@ -666,8 +659,7 @@ static int rspi_transfer_out_in(struct rspi_data *rspi, ...@@ -666,8 +659,7 @@ static int rspi_transfer_out_in(struct rspi_data *rspi,
rspi_write8(rspi, spcr, RSPI_SPCR); rspi_write8(rspi, spcr, RSPI_SPCR);
while (remain > 0) { while (remain > 0) {
data = tx_buf ? *tx_buf++ : DUMMY_DATA; ret = rspi_data_out(rspi, *tx_buf++);
ret = rspi_data_out(rspi, data);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (rx_buf) { if (rx_buf) {
...@@ -689,20 +681,14 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, ...@@ -689,20 +681,14 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer) struct spi_transfer *xfer)
{ {
struct rspi_data *rspi = spi_master_get_devdata(master); struct rspi_data *rspi = spi_master_get_devdata(master);
int ret;
if (!rspi_is_dma(rspi, xfer)) if (!rspi_is_dma(rspi, xfer))
return rspi_transfer_out_in(rspi, xfer); return rspi_transfer_out_in(rspi, xfer);
if (xfer->tx_buf) {
ret = rspi_send_dma(rspi, xfer);
if (ret < 0)
return ret;
}
if (xfer->rx_buf) if (xfer->rx_buf)
return rspi_receive_dma(rspi, xfer); return rspi_send_receive_dma(rspi, xfer);
else
return 0; return rspi_send_dma(rspi, xfer);
} }
static int rspi_rz_transfer_out_in(struct rspi_data *rspi, static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
...@@ -711,16 +697,13 @@ static int rspi_rz_transfer_out_in(struct rspi_data *rspi, ...@@ -711,16 +697,13 @@ static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
int remain = xfer->len, ret; int remain = xfer->len, ret;
const u8 *tx_buf = xfer->tx_buf; const u8 *tx_buf = xfer->tx_buf;
u8 *rx_buf = xfer->rx_buf; u8 *rx_buf = xfer->rx_buf;
u8 data;
rspi_rz_receive_init(rspi); rspi_rz_receive_init(rspi);
while (remain > 0) { while (remain > 0) {
data = tx_buf ? *tx_buf++ : DUMMY_DATA; ret = rspi_data_out_in(rspi, *tx_buf++);
ret = rspi_data_out_in(rspi, data);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (rx_buf)
*rx_buf++ = ret; *rx_buf++ = ret;
remain--; remain--;
} }
...@@ -746,16 +729,13 @@ static int qspi_transfer_out_in(struct rspi_data *rspi, ...@@ -746,16 +729,13 @@ static int qspi_transfer_out_in(struct rspi_data *rspi,
int remain = xfer->len, ret; int remain = xfer->len, ret;
const u8 *tx_buf = xfer->tx_buf; const u8 *tx_buf = xfer->tx_buf;
u8 *rx_buf = xfer->rx_buf; u8 *rx_buf = xfer->rx_buf;
u8 data;
qspi_receive_init(rspi); qspi_receive_init(rspi);
while (remain > 0) { while (remain > 0) {
data = tx_buf ? *tx_buf++ : DUMMY_DATA; ret = rspi_data_out_in(rspi, *tx_buf++);
ret = rspi_data_out_in(rspi, data);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (rx_buf)
*rx_buf++ = ret; *rx_buf++ = ret;
remain--; remain--;
} }
...@@ -807,10 +787,10 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, ...@@ -807,10 +787,10 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
if (spi->mode & SPI_LOOP) { if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer); return qspi_transfer_out_in(rspi, xfer);
} else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) { } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
/* Quad or Dual SPI Write */ /* Quad or Dual SPI Write */
return qspi_transfer_out(rspi, xfer); return qspi_transfer_out(rspi, xfer);
} else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) { } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
/* Quad or Dual SPI Read */ /* Quad or Dual SPI Read */
return qspi_transfer_in(rspi, xfer); return qspi_transfer_in(rspi, xfer);
} else { } else {
...@@ -1064,12 +1044,14 @@ static const struct spi_ops rspi_ops = { ...@@ -1064,12 +1044,14 @@ static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register, .set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one, .transfer_one = rspi_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_MASTER_MUST_TX,
}; };
static const struct spi_ops rspi_rz_ops = { static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register, .set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one, .transfer_one = rspi_rz_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
}; };
static const struct spi_ops qspi_ops = { static const struct spi_ops qspi_ops = {
...@@ -1078,6 +1060,7 @@ static const struct spi_ops qspi_ops = { ...@@ -1078,6 +1060,7 @@ static const struct spi_ops qspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD, SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
}; };
#ifdef CONFIG_OF #ifdef CONFIG_OF
...@@ -1197,6 +1180,7 @@ static int rspi_probe(struct platform_device *pdev) ...@@ -1197,6 +1180,7 @@ static int rspi_probe(struct platform_device *pdev)
master->prepare_message = rspi_prepare_message; master->prepare_message = rspi_prepare_message;
master->unprepare_message = rspi_unprepare_message; master->unprepare_message = rspi_unprepare_message;
master->mode_bits = ops->mode_bits; master->mode_bits = ops->mode_bits;
master->flags = ops->flags;
master->dev.of_node = pdev->dev.of_node; master->dev.of_node = pdev->dev.of_node;
ret = platform_get_irq_byname(pdev, "rx"); ret = platform_get_irq_byname(pdev, "rx");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment