Commit 81bbadc6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-v3.17-rc3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi bugfixes from Mark Brown:
 "A smattering of bug fixes for the SPI subsystem, all in driver code
  which has seen active work recently and none of them with any great
  global impact.

  There's also a new ACPI ID for the pxa2xx driver which required no
  code changes and the addition of kerneldoc for some structure fields
  that were missing it and generating warnings during documentation
  builds as a result"

* tag 'spi-v3.17-rc3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi:
  spi: sh-msiof: Fix transmit-only DMA transfers
  spi/rockchip: Avoid accidentally turning off the clock
  spi: dw: fix kernel crash due to NULL pointer dereference
  spi: dw-pci: fix bug when regs left uninitialized
  spi: davinci: fix SPI_NO_CS functionality
  spi/rockchip: fixup incorrect dma direction setting
  spi/pxa2xx: Add ACPI ID for Intel Braswell
  spi: spi-au1550: fix build failure
  spi: rspi: Fix leaking of unused DMA descriptors
  spi: sh-msiof: Fix leaking of unused DMA descriptors
  spi: Add missing kerneldoc bits
  spi/omap-mcspi: Fix the spi task hangs waiting dma_rx
parents 35e27445 2dbf5910
...@@ -945,7 +945,7 @@ static int au1550_spi_remove(struct platform_device *pdev) ...@@ -945,7 +945,7 @@ static int au1550_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&hw->bitbang); spi_bitbang_stop(&hw->bitbang);
free_irq(hw->irq, hw); free_irq(hw->irq, hw);
iounmap((void __iomem *)hw->regs); iounmap((void __iomem *)hw->regs);
release_mem_region(r->start, sizeof(psc_spi_t)); release_mem_region(hw->ioarea->start, sizeof(psc_spi_t));
if (hw->usedma) { if (hw->usedma) {
au1550_spi_dma_rxtmp_free(hw); au1550_spi_dma_rxtmp_free(hw);
......
...@@ -417,16 +417,16 @@ static int davinci_spi_setup(struct spi_device *spi) ...@@ -417,16 +417,16 @@ static int davinci_spi_setup(struct spi_device *spi)
flags, dev_name(&spi->dev)); flags, dev_name(&spi->dev));
internal_cs = false; internal_cs = false;
} }
}
if (retval) { if (retval) {
dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
spi->cs_gpio, retval); spi->cs_gpio, retval);
return retval; return retval;
} }
if (internal_cs) if (internal_cs)
set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
}
if (spi->mode & SPI_READY) if (spi->mode & SPI_READY)
set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
......
...@@ -62,6 +62,8 @@ static int spi_pci_probe(struct pci_dev *pdev, ...@@ -62,6 +62,8 @@ static int spi_pci_probe(struct pci_dev *pdev,
if (ret) if (ret)
return ret; return ret;
dws->regs = pcim_iomap_table(pdev)[pci_bar];
dws->bus_num = 0; dws->bus_num = 0;
dws->num_cs = 4; dws->num_cs = 4;
dws->irq = pdev->irq; dws->irq = pdev->irq;
......
...@@ -271,7 +271,7 @@ static void giveback(struct dw_spi *dws) ...@@ -271,7 +271,7 @@ static void giveback(struct dw_spi *dws)
transfer_list); transfer_list);
if (!last_transfer->cs_change) if (!last_transfer->cs_change)
spi_chip_sel(dws, dws->cur_msg->spi, 0); spi_chip_sel(dws, msg->spi, 0);
spi_finalize_current_message(dws->master); spi_finalize_current_message(dws->master);
} }
......
...@@ -329,7 +329,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, ...@@ -329,7 +329,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
disable_fifo: disable_fifo:
if (t->rx_buf != NULL) if (t->rx_buf != NULL)
chconf &= ~OMAP2_MCSPI_CHCONF_FFER; chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
else
if (t->tx_buf != NULL)
chconf &= ~OMAP2_MCSPI_CHCONF_FFET; chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
mcspi_write_chconf0(spi, chconf); mcspi_write_chconf0(spi, chconf);
......
...@@ -1074,6 +1074,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = { ...@@ -1074,6 +1074,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT3430", 0 }, { "INT3430", 0 },
{ "INT3431", 0 }, { "INT3431", 0 },
{ "80860F0E", 0 }, { "80860F0E", 0 },
{ "8086228E", 0 },
{ }, { },
}; };
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
......
...@@ -499,7 +499,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs) ...@@ -499,7 +499,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
} }
/* div doesn't support odd number */ /* div doesn't support odd number */
div = rs->max_freq / rs->speed; div = max_t(u32, rs->max_freq / rs->speed, 1);
div = (div + 1) & 0xfffe; div = (div + 1) & 0xfffe;
spi_enable_chip(rs, 0); spi_enable_chip(rs, 0);
...@@ -678,7 +678,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) ...@@ -678,7 +678,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
rs->dma_tx.direction = DMA_MEM_TO_DEV; rs->dma_tx.direction = DMA_MEM_TO_DEV;
rs->dma_tx.direction = DMA_DEV_TO_MEM; rs->dma_rx.direction = DMA_DEV_TO_MEM;
master->can_dma = rockchip_spi_can_dma; master->can_dma = rockchip_spi_can_dma;
master->dma_tx = rs->dma_tx.ch; master->dma_tx = rs->dma_tx.ch;
......
...@@ -472,25 +472,52 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ...@@ -472,25 +472,52 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
dma_cookie_t cookie; dma_cookie_t cookie;
int ret; int ret;
if (tx) { /* First prepare and submit the DMA request(s), as this may fail */
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
tx->sgl, tx->nents, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
goto no_dma;
irq_mask |= SPCR_SPTIE;
}
if (rx) { if (rx) {
desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
rx->sgl, rx->nents, DMA_FROM_DEVICE, rx->sgl, rx->nents, DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) if (!desc_rx) {
goto no_dma; ret = -EAGAIN;
goto no_dma_rx;
}
desc_rx->callback = rspi_dma_complete;
desc_rx->callback_param = rspi;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_rx;
}
irq_mask |= SPCR_SPRIE; irq_mask |= SPCR_SPRIE;
} }
if (tx) {
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
tx->sgl, tx->nents, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
goto no_dma_tx;
}
if (rx) {
/* No callback */
desc_tx->callback = NULL;
} else {
desc_tx->callback = rspi_dma_complete;
desc_tx->callback_param = rspi;
}
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_tx;
}
irq_mask |= SPCR_SPTIE;
}
/* /*
* DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer. * called. So, this driver disables the IRQ while DMA transfer.
...@@ -503,34 +530,24 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ...@@ -503,34 +530,24 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi_enable_irq(rspi, irq_mask); rspi_enable_irq(rspi, irq_mask);
rspi->dma_callbacked = 0; rspi->dma_callbacked = 0;
if (rx) { /* Now start DMA */
desc_rx->callback = rspi_dma_complete; if (rx)
desc_rx->callback_param = rspi;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie))
return cookie;
dma_async_issue_pending(rspi->master->dma_rx); dma_async_issue_pending(rspi->master->dma_rx);
} if (tx)
if (tx) {
if (rx) {
/* No callback */
desc_tx->callback = NULL;
} else {
desc_tx->callback = rspi_dma_complete;
desc_tx->callback_param = rspi;
}
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie))
return cookie;
dma_async_issue_pending(rspi->master->dma_tx); dma_async_issue_pending(rspi->master->dma_tx);
}
ret = wait_event_interruptible_timeout(rspi->wait, ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ); rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) if (ret > 0 && rspi->dma_callbacked)
ret = 0; ret = 0;
else if (!ret) else if (!ret) {
dev_err(&rspi->master->dev, "DMA timeout\n");
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
}
rspi_disable_irq(rspi, irq_mask); rspi_disable_irq(rspi, irq_mask);
...@@ -541,11 +558,16 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ...@@ -541,11 +558,16 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
return ret; return ret;
no_dma: no_dma_tx:
pr_warn_once("%s %s: DMA not available, falling back to PIO\n", if (rx)
dev_driver_string(&rspi->master->dev), dmaengine_terminate_all(rspi->master->dma_rx);
dev_name(&rspi->master->dev)); no_dma_rx:
return -EAGAIN; if (ret == -EAGAIN) {
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&rspi->master->dev),
dev_name(&rspi->master->dev));
}
return ret;
} }
static void rspi_receive_init(const struct rspi_data *rspi) static void rspi_receive_init(const struct rspi_data *rspi)
......
...@@ -636,48 +636,38 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, ...@@ -636,48 +636,38 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
dma_cookie_t cookie; dma_cookie_t cookie;
int ret; int ret;
if (tx) { /* First prepare and submit the DMA request(s), as this may fail */
ier_bits |= IER_TDREQE | IER_TDMAE;
dma_sync_single_for_device(p->master->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
p->tx_dma_addr, len, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
return -EAGAIN;
}
if (rx) { if (rx) {
ier_bits |= IER_RDREQE | IER_RDMAE; ier_bits |= IER_RDREQE | IER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
p->rx_dma_addr, len, DMA_FROM_DEVICE, p->rx_dma_addr, len, DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) if (!desc_rx) {
return -EAGAIN; ret = -EAGAIN;
} goto no_dma_rx;
}
/* 1 stage FIFO watermarks for DMA */
sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
sh_msiof_write(p, IER, ier_bits);
reinit_completion(&p->done);
if (rx) {
desc_rx->callback = sh_msiof_dma_complete; desc_rx->callback = sh_msiof_dma_complete;
desc_rx->callback_param = p; desc_rx->callback_param = p;
cookie = dmaengine_submit(desc_rx); cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie)) { if (dma_submit_error(cookie)) {
ret = cookie; ret = cookie;
goto stop_ier; goto no_dma_rx;
} }
dma_async_issue_pending(p->master->dma_rx);
} }
if (tx) { if (tx) {
ier_bits |= IER_TDREQE | IER_TDMAE;
dma_sync_single_for_device(p->master->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
p->tx_dma_addr, len, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
goto no_dma_tx;
}
if (rx) { if (rx) {
/* No callback */ /* No callback */
desc_tx->callback = NULL; desc_tx->callback = NULL;
...@@ -688,15 +678,30 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, ...@@ -688,15 +678,30 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
cookie = dmaengine_submit(desc_tx); cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) { if (dma_submit_error(cookie)) {
ret = cookie; ret = cookie;
goto stop_rx; goto no_dma_tx;
} }
dma_async_issue_pending(p->master->dma_tx);
} }
/* 1 stage FIFO watermarks for DMA */
sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
sh_msiof_write(p, IER, ier_bits);
reinit_completion(&p->done);
/* Now start DMA */
if (rx)
dma_async_issue_pending(p->master->dma_rx);
if (tx)
dma_async_issue_pending(p->master->dma_tx);
ret = sh_msiof_spi_start(p, rx); ret = sh_msiof_spi_start(p, rx);
if (ret) { if (ret) {
dev_err(&p->pdev->dev, "failed to start hardware\n"); dev_err(&p->pdev->dev, "failed to start hardware\n");
goto stop_tx; goto stop_dma;
} }
/* wait for tx fifo to be emptied / rx fifo to be filled */ /* wait for tx fifo to be emptied / rx fifo to be filled */
...@@ -726,14 +731,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, ...@@ -726,14 +731,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
stop_reset: stop_reset:
sh_msiof_reset_str(p); sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx); sh_msiof_spi_stop(p, rx);
stop_tx: stop_dma:
if (tx) if (tx)
dmaengine_terminate_all(p->master->dma_tx); dmaengine_terminate_all(p->master->dma_tx);
stop_rx: no_dma_tx:
if (rx) if (rx)
dmaengine_terminate_all(p->master->dma_rx); dmaengine_terminate_all(p->master->dma_rx);
stop_ier:
sh_msiof_write(p, IER, 0); sh_msiof_write(p, IER, 0);
no_dma_rx:
return ret; return ret;
} }
......
...@@ -848,6 +848,7 @@ static int spi_transfer_one_message(struct spi_master *master, ...@@ -848,6 +848,7 @@ static int spi_transfer_one_message(struct spi_master *master,
/** /**
* spi_finalize_current_transfer - report completion of a transfer * spi_finalize_current_transfer - report completion of a transfer
* @master: the master reporting completion
* *
* Called by SPI drivers using the core transfer_one_message() * Called by SPI drivers using the core transfer_one_message()
* implementation to notify it that the current interrupt driven * implementation to notify it that the current interrupt driven
......
...@@ -253,6 +253,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) ...@@ -253,6 +253,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* the device whose settings are being modified. * the device whose settings are being modified.
* @transfer: adds a message to the controller's transfer queue. * @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state * @cleanup: frees controller-specific state
* @can_dma: determine whether this master supports DMA
* @queued: whether this master is providing an internal message queue * @queued: whether this master is providing an internal message queue
* @kworker: thread struct for message pump * @kworker: thread struct for message pump
* @kworker_task: pointer to task for message pump kworker thread * @kworker_task: pointer to task for message pump kworker thread
...@@ -262,6 +263,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) ...@@ -262,6 +263,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cur_msg: the currently in-flight message * @cur_msg: the currently in-flight message
* @cur_msg_prepared: spi_prepare_message was called for the currently * @cur_msg_prepared: spi_prepare_message was called for the currently
* in-flight message * in-flight message
* @cur_msg_mapped: message has been mapped for DMA
* @xfer_completion: used by core transfer_one_message() * @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy * @busy: message pump is busy
* @running: message pump is running * @running: message pump is running
...@@ -299,6 +301,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) ...@@ -299,6 +301,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that * number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself). * are not GPIOs (driven by the SPI controller itself).
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
* @dummy_tx: dummy transmit buffer for full-duplex devices
* *
* Each SPI master controller can communicate with one or more @spi_device * Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals * children. These make a small bus, sharing MOSI, MISO and SCK signals
...@@ -632,6 +638,7 @@ struct spi_transfer { ...@@ -632,6 +638,7 @@ struct spi_transfer {
* addresses for each transfer buffer * addresses for each transfer buffer
* @complete: called to report transaction completions * @complete: called to report transaction completions
* @context: the argument to complete() when it's called * @context: the argument to complete() when it's called
* @frame_length: the total number of bytes in the message
* @actual_length: the total number of bytes that were transferred in all * @actual_length: the total number of bytes that were transferred in all
* successful segments * successful segments
* @status: zero for success, else negative errno * @status: zero for success, else negative errno
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment