Commit ae324b27 authored by Mark Brown's avatar Mark Brown

Merge remote-tracking branches 'spi/topic/a3700', 'spi/topic/atmel',...

Merge remote-tracking branches 'spi/topic/a3700', 'spi/topic/atmel', 'spi/topic/bcm53xx', 'spi/topic/davinci' and 'spi/topic/dw' into spi-next
......@@ -27,6 +27,8 @@
#define DRIVER_NAME "armada_3700_spi"
#define A3700_SPI_MAX_SPEED_HZ 100000000
#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
......@@ -184,12 +186,15 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
return 0;
}
static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val |= A3700_SPI_FIFO_MODE;
if (enable)
val |= A3700_SPI_FIFO_MODE;
else
val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
......@@ -297,7 +302,7 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi);
a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, master->mode_bits);
......@@ -416,15 +421,20 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
unsigned int byte_len;
a3700_spi = spi_master_get_devdata(spi->master);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
byte_len = xfer->bits_per_word >> 3;
/* Use 4 bytes long transfers. Each transfer method has its way to deal
* with the remaining bytes for non 4-bytes aligned transfers.
*/
a3700_spi_bytelen_set(a3700_spi, 4);
a3700_spi_fifo_thres_set(a3700_spi, byte_len);
/* Initialize the working buffers */
a3700_spi->tx_buf = xfer->tx_buf;
a3700_spi->rx_buf = xfer->rx_buf;
a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
......@@ -491,7 +501,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
......@@ -514,9 +524,8 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
u32 data = le32_to_cpu(val);
memcpy(a3700_spi->rx_buf, &data, 4);
memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
......@@ -579,27 +588,26 @@ static int a3700_spi_prepare_message(struct spi_master *master,
if (ret)
return ret;
a3700_spi_bytelen_set(a3700_spi, 4);
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
static int a3700_spi_transfer_one(struct spi_master *master,
static int a3700_spi_transfer_one_fifo(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
unsigned int nbits = 0;
unsigned int nbits = 0, byte_len;
u32 val;
a3700_spi_transfer_setup(spi, xfer);
/* Make sure we use FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, true);
a3700_spi->tx_buf = xfer->tx_buf;
a3700_spi->rx_buf = xfer->rx_buf;
a3700_spi->buf_len = xfer->len;
/* Configure FIFO thresholds */
byte_len = xfer->bits_per_word >> 3;
a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
......@@ -615,6 +623,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
/* Clear WFIFO, since it's last 2 bytes are shifted out during
* a read operation
*/
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
......@@ -729,6 +742,63 @@ static int a3700_spi_transfer_one(struct spi_master *master,
return ret;
}
static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
u32 val;
/* Disable FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, false);
while (a3700_spi->buf_len) {
/* When we have less than 4 bytes to transfer, switch to 1 byte
* mode. This is reset after each transfer
*/
if (a3700_spi->buf_len < 4)
a3700_spi_bytelen_set(a3700_spi, 1);
if (a3700_spi->byte_len == 1)
val = *a3700_spi->tx_buf;
else
val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
/* Wait for all the data to be shifted in / out */
while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
A3700_SPI_XFER_DONE))
cpu_relax();
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
a3700_spi->buf_len -= a3700_spi->byte_len;
a3700_spi->tx_buf += a3700_spi->byte_len;
a3700_spi->rx_buf += a3700_spi->byte_len;
}
spi_finalize_current_transfer(master);
return 0;
}
static int a3700_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
a3700_spi_transfer_setup(spi, xfer);
if (xfer->tx_buf && xfer->rx_buf)
return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
return a3700_spi_transfer_one_fifo(master, spi, xfer);
}
static int a3700_spi_unprepare_message(struct spi_master *master,
struct spi_message *message)
{
......@@ -778,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->transfer_one = a3700_spi_transfer_one;
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
......@@ -818,6 +887,11 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
clk_get_rate(spi->clk));
master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
A3700_SPI_MAX_PRESCALE);
ret = a3700_spi_init(spi);
if (ret)
goto error_clk;
......
......@@ -291,6 +291,10 @@ struct atmel_spi {
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
dma_addr_t dma_addr_rx_bbuf;
dma_addr_t dma_addr_tx_bbuf;
void *addr_rx_bbuf;
void *addr_tx_bbuf;
struct completion xfer_completion;
......@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
spin_unlock_irqrestore(&as->lock, as->flags);
}
static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
{
return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
}
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
......@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
return atmel_spi_use_dma(as, xfer);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
return atmel_spi_use_dma(as, xfer) &&
!atmel_spi_is_vmalloc_xfer(xfer);
else
return atmel_spi_use_dma(as, xfer);
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
......@@ -594,6 +608,11 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
as->current_transfer->len);
}
complete(&as->xfer_completion);
}
......@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_exit;
/* Send both scatterlists */
rxdesc = dmaengine_prep_slave_sg(rxchan,
xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (atmel_spi_is_vmalloc_xfer(xfer) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
rxdesc = dmaengine_prep_slave_single(rxchan,
as->dma_addr_rx_bbuf,
xfer->len,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
} else {
rxdesc = dmaengine_prep_slave_sg(rxchan,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
}
if (!rxdesc)
goto err_dma;
txdesc = dmaengine_prep_slave_sg(txchan,
xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (atmel_spi_is_vmalloc_xfer(xfer) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
txdesc = dmaengine_prep_slave_single(txchan,
as->dma_addr_tx_bbuf,
xfer->len, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
} else {
txdesc = dmaengine_prep_slave_sg(txchan,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
}
if (!txdesc)
goto err_dma;
......@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
#ifdef CONFIG_SOC_SAM_V4_V5
/*
* Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
* since this later function tries to map buffers with dma_map_sg()
* even if they have not been allocated inside DMA-safe areas.
* On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
* those ARM cores, the data cache follows the PIPT model.
* Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
* In case of PIPT caches, there cannot be cache aliases.
* However on ARM9 cores, the data cache follows the VIVT model, hence
* the cache aliases issue can occur when buffers are allocated from
* DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
* not taken into account or at least not handled completely (cache
* lines of aliases are not invalidated).
* This is not a theorical issue: it was reproduced when trying to mount
* a UBI file-system on a at91sam9g35ek board.
*/
as->caps.has_dma_support = false;
#else
as->caps.has_dma_support = version >= 0x212;
#endif
as->caps.has_pdc_support = version < 0x212;
}
......@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_pdc = true;
}
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
SPI_MAX_DMA_XFER,
&as->dma_addr_rx_bbuf,
GFP_KERNEL | GFP_DMA);
if (!as->addr_rx_bbuf) {
as->use_dma = false;
} else {
as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
SPI_MAX_DMA_XFER,
&as->dma_addr_tx_bbuf,
GFP_KERNEL | GFP_DMA);
if (!as->addr_tx_bbuf) {
as->use_dma = false;
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_rx_bbuf,
as->dma_addr_rx_bbuf);
}
}
if (!as->use_dma)
dev_info(master->dev.parent,
" can not allocate dma coherent memory\n");
}
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
......@@ -1664,6 +1711,14 @@ static int atmel_spi_remove(struct platform_device *pdev)
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_tx_bbuf,
as->dma_addr_tx_bbuf);
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_rx_bbuf,
as->dma_addr_rx_bbuf);
}
}
spin_lock_irq(&as->lock);
......
......@@ -27,8 +27,6 @@ struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
size_t read_offset;
bool bspi; /* Boot SPI mode with memory mapping */
};
......@@ -172,8 +170,6 @@ static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
b53spi->read_offset = len;
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
......@@ -182,10 +178,10 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
u32 tmp;
int i;
for (i = 0; i < b53spi->read_offset + len; i++) {
for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
if (!cont && i == b53spi->read_offset + len - 1)
if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
......@@ -194,8 +190,7 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
b53spi->read_offset + len - 1);
bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
......@@ -214,13 +209,11 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
int offset = b53spi->read_offset + i;
u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
b53spi->read_offset = 0;
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
......@@ -238,7 +231,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
bool cont = left - to_write > 0;
bool cont = !spi_transfer_is_last(master, t) ||
left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
......@@ -250,9 +244,9 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
left);
bool cont = left - to_read > 0;
size_t to_read = min_t(size_t, 16, left);
bool cont = !spi_transfer_is_last(master, t) ||
left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
......
......@@ -945,6 +945,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
init_completion(&dspi->done);
ret = platform_get_irq(pdev, 0);
if (ret == 0)
ret = -EINVAL;
......@@ -1021,8 +1023,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
init_completion(&dspi->done);
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
......
......@@ -30,13 +30,11 @@
/* Slave spi_dev related */
struct chip_data {
u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment