Commit cfd12db4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi updates from Mark Brown:
 "Quite a busy release for SPI, mainly as a result of Boris Brezillon's
  work on improving the integration with MTD for accelerated SPI flash
  controllers. He's added a new spi_mem interface which works a lot
  better with general hardware and converted the users over to it, as a
  result of this work we've got some MTD changes in here as well.

  Other highlights include:

   - Lots of spring cleaning for the s3c64xx driver.

   - Removal of the bcm53xx, the hardware is also supported by the mspi
     driver but SoC naming had caused people to miss the duplication.

   - Conversion of the pxa2xx driver to use the standard message
     processing loop rather than open coding.

   - A bunch of improvements to the runtime PM of the OMAP McSPI driver"

* tag 'spi-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (47 commits)
  spi: Fix typo on SPI_MEM help text
  spi: sh-msiof: Fix setting SIRMDR1.SYNCAC to match SITMDR1.SYNCAC
  mtd: devices: m25p80: Use spi_mem_set_drvdata() instead of spi_set_drvdata()
  spi: omap2-mcspi: Remove unnecessary pm_runtime_force_suspend()
  spi: Add missing pm_runtime_put_noidle() after failed get
  spi: ti-qspi: Make sure res_mmap != NULL before dereferencing it
  spi: spi-s3c64xx: Fix system resume support
  spi: bcm-qspi: Fix build failure caused by spi_flash_read() API removal
  spi: Get rid of the spi_flash_read() API
  mtd: spi-nor: Use the spi_mem_xx() API
  spi: ti-qspi: Implement the spi_mem interface
  spi: bcm-qspi: Implement the spi_mem interface
  spi: Make support for regular transfers optional when ->mem_ops != NULL
  spi: Extend the core to ease integration of SPI memory controllers
  spi: remove forgotten CONFIG_SPI_BCM53XX
  spi: remove the older/duplicated bcm53xx driver
  spi: pxa2xx: check clk_prepare_enable() return value
  spi: lpspi: Switch to SPDX identifier
  spi: mxs: Switch to SPDX identifier
  spi: imx: Switch to SPDX identifier
  ...
parents 910470e0 16c10b3b
...@@ -81,6 +81,7 @@ config MTD_DATAFLASH_OTP ...@@ -81,6 +81,7 @@ config MTD_DATAFLASH_OTP
config MTD_M25P80 config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)" tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
depends on SPI_MASTER && MTD_SPI_NOR depends on SPI_MASTER && MTD_SPI_NOR
select SPI_MEM
help help
This enables access to most modern SPI flash chips, used for This enables access to most modern SPI flash chips, used for
program and data storage. Series supported include Atmel AT26DF, program and data storage. Series supported include Atmel AT26DF,
......
This diff is collapsed.
...@@ -47,6 +47,13 @@ config SPI_MASTER ...@@ -47,6 +47,13 @@ config SPI_MASTER
if SPI_MASTER if SPI_MASTER
config SPI_MEM
bool "SPI memory extension"
help
Enable this option if you want to enable the SPI memory extension.
This extension is meant to simplify interaction with SPI memories
by providing a high-level interface to send memory-like commands.
comment "SPI Master Controller Drivers" comment "SPI Master Controller Drivers"
config SPI_ALTERA config SPI_ALTERA
...@@ -71,7 +78,6 @@ config SPI_ARMADA_3700 ...@@ -71,7 +78,6 @@ config SPI_ARMADA_3700
config SPI_ATMEL config SPI_ATMEL
tristate "Atmel SPI Controller" tristate "Atmel SPI Controller"
depends on HAS_DMA
depends on ARCH_AT91 || COMPILE_TEST depends on ARCH_AT91 || COMPILE_TEST
help help
This selects a driver for the Atmel SPI Controller, present on This selects a driver for the Atmel SPI Controller, present on
...@@ -115,14 +121,6 @@ config SPI_BCM2835AUX ...@@ -115,14 +121,6 @@ config SPI_BCM2835AUX
"universal SPI master", and the regular SPI controller. "universal SPI master", and the regular SPI controller.
This driver is for the universal/auxiliary SPI controller. This driver is for the universal/auxiliary SPI controller.
config SPI_BCM53XX
tristate "Broadcom BCM53xx SPI controller"
depends on ARCH_BCM_5301X
depends on BCMA_POSSIBLE
select BCMA
help
Enable support for the SPI controller on Broadcom BCM53xx ARM SoCs.
config SPI_BCM63XX config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller" tristate "Broadcom BCM63xx SPI controller"
depends on BCM63XX || COMPILE_TEST depends on BCM63XX || COMPILE_TEST
...@@ -233,7 +231,6 @@ config SPI_EFM32 ...@@ -233,7 +231,6 @@ config SPI_EFM32
config SPI_EP93XX config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller" tristate "Cirrus Logic EP93xx SPI controller"
depends on HAS_DMA
depends on ARCH_EP93XX || COMPILE_TEST depends on ARCH_EP93XX || COMPILE_TEST
help help
This enables using the Cirrus EP93xx SPI controller in master This enables using the Cirrus EP93xx SPI controller in master
...@@ -355,7 +352,6 @@ config SPI_FSL_SPI ...@@ -355,7 +352,6 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI config SPI_FSL_DSPI
tristate "Freescale DSPI controller" tristate "Freescale DSPI controller"
select REGMAP_MMIO select REGMAP_MMIO
depends on HAS_DMA
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help help
This enables support for the Freescale DSPI controller in master This enables support for the Freescale DSPI controller in master
...@@ -431,7 +427,6 @@ config SPI_OMAP_UWIRE ...@@ -431,7 +427,6 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX config SPI_OMAP24XX
tristate "McSPI driver for OMAP" tristate "McSPI driver for OMAP"
depends on HAS_DMA
depends on ARCH_OMAP2PLUS || COMPILE_TEST depends on ARCH_OMAP2PLUS || COMPILE_TEST
select SG_SPLIT select SG_SPLIT
help help
...@@ -440,7 +435,6 @@ config SPI_OMAP24XX ...@@ -440,7 +435,6 @@ config SPI_OMAP24XX
config SPI_TI_QSPI config SPI_TI_QSPI
tristate "DRA7xxx QSPI controller support" tristate "DRA7xxx QSPI controller support"
depends on HAS_DMA
depends on ARCH_OMAP2PLUS || COMPILE_TEST depends on ARCH_OMAP2PLUS || COMPILE_TEST
help help
QSPI master controller for DRA7xxx used for flash devices. QSPI master controller for DRA7xxx used for flash devices.
...@@ -469,7 +463,6 @@ config SPI_PIC32 ...@@ -469,7 +463,6 @@ config SPI_PIC32
config SPI_PIC32_SQI config SPI_PIC32_SQI
tristate "Microchip PIC32 Quad SPI driver" tristate "Microchip PIC32 Quad SPI driver"
depends on MACH_PIC32 || COMPILE_TEST depends on MACH_PIC32 || COMPILE_TEST
depends on HAS_DMA
help help
SPI driver for PIC32 Quad SPI controller. SPI driver for PIC32 Quad SPI controller.
...@@ -572,7 +565,7 @@ config SPI_SC18IS602 ...@@ -572,7 +565,7 @@ config SPI_SC18IS602
config SPI_SH_MSIOF config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller" tristate "SuperH MSIOF SPI controller"
depends on HAVE_CLK && HAS_DMA depends on HAVE_CLK
depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
help help
SPI driver for SuperH and SH Mobile MSIOF blocks. SPI driver for SuperH and SH Mobile MSIOF blocks.
...@@ -650,7 +643,7 @@ config SPI_MXS ...@@ -650,7 +643,7 @@ config SPI_MXS
config SPI_TEGRA114 config SPI_TEGRA114
tristate "NVIDIA Tegra114 SPI Controller" tristate "NVIDIA Tegra114 SPI Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
depends on RESET_CONTROLLER && HAS_DMA depends on RESET_CONTROLLER
help help
SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
is different than the older SoCs SPI controller and also register interface is different than the older SoCs SPI controller and also register interface
...@@ -668,7 +661,7 @@ config SPI_TEGRA20_SFLASH ...@@ -668,7 +661,7 @@ config SPI_TEGRA20_SFLASH
config SPI_TEGRA20_SLINK config SPI_TEGRA20_SLINK
tristate "Nvidia Tegra20/Tegra30 SLINK Controller" tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
depends on RESET_CONTROLLER && HAS_DMA depends on RESET_CONTROLLER
help help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface. SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
......
...@@ -8,6 +8,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG ...@@ -8,6 +8,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
# small core, mostly translating board-specific # small core, mostly translating board-specific
# config declarations into driver model code # config declarations into driver model code
obj-$(CONFIG_SPI_MASTER) += spi.o obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
...@@ -20,7 +21,6 @@ obj-$(CONFIG_SPI_AU1550) += spi-au1550.o ...@@ -20,7 +21,6 @@ obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.o obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.o
......
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*
* Helpers needed by the spi or spi-mem logic. Should not be used outside of
* spi-mem.c and spi.c.
*/
#ifndef __LINUX_SPI_INTERNALS_H
#define __LINUX_SPI_INTERNALS_H
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
void spi_flush_queue(struct spi_controller *ctrl);
#ifdef CONFIG_HAS_DMA
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir);
void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir);
#else /* !CONFIG_HAS_DMA */
static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
return -EINVAL;
}
static inline void spi_unmap_buf(struct spi_controller *ctlr,
struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir)
{
}
#endif /* CONFIG_HAS_DMA */
#endif /* __LINUX_SPI_INTERNALS_H */
This diff is collapsed.
/*
* Copyright (C) 2014-2016 Rafał Miłecki <rafal@milecki.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/bcma/bcma.h>
#include <linux/spi/spi.h>
#include "spi-bcm53xx.h"
#define BCM53XXSPI_MAX_SPI_BAUD 13500000 /* 216 MHz? */
#define BCM53XXSPI_FLASH_WINDOW SZ_32M
/* The longest observed required wait was 19 ms */
#define BCM53XXSPI_SPE_TIMEOUT_MS 80
struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
bool bspi; /* Boot SPI mode with memory mapping */
};
static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset)
{
return bcma_read32(b53spi->core, offset);
}
static inline void bcm53xxspi_write(struct bcm53xxspi *b53spi, u16 offset,
u32 value)
{
bcma_write32(b53spi->core, offset, value);
}
static void bcm53xxspi_disable_bspi(struct bcm53xxspi *b53spi)
{
struct device *dev = &b53spi->core->dev;
unsigned long deadline;
u32 tmp;
if (!b53spi->bspi)
return;
tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL);
if (tmp & 0x1)
return;
deadline = jiffies + usecs_to_jiffies(200);
do {
tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_BUSY_STATUS);
if (!(tmp & 0x1)) {
bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL,
0x1);
ndelay(200);
b53spi->bspi = false;
return;
}
udelay(1);
} while (!time_after_eq(jiffies, deadline));
dev_warn(dev, "Timeout disabling BSPI\n");
}
static void bcm53xxspi_enable_bspi(struct bcm53xxspi *b53spi)
{
u32 tmp;
if (b53spi->bspi)
return;
tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL);
if (!(tmp & 0x1))
return;
bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL, 0x0);
b53spi->bspi = true;
}
static inline unsigned int bcm53xxspi_calc_timeout(size_t len)
{
/* Do some magic calculation based on length and buad. Add 10% and 1. */
return (len * 9000 / BCM53XXSPI_MAX_SPI_BAUD * 110 / 100) + 1;
}
static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms)
{
unsigned long deadline;
u32 tmp;
/* SPE bit has to be 0 before we read MSPI STATUS */
deadline = jiffies + msecs_to_jiffies(BCM53XXSPI_SPE_TIMEOUT_MS);
do {
tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
if (!(tmp & B53SPI_MSPI_SPCR2_SPE))
break;
udelay(5);
} while (!time_after_eq(jiffies, deadline));
if (tmp & B53SPI_MSPI_SPCR2_SPE)
goto spi_timeout;
/* Check status */
deadline = jiffies + msecs_to_jiffies(timeout_ms);
do {
tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS);
if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) {
bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0);
return 0;
}
cpu_relax();
udelay(100);
} while (!time_after_eq(jiffies, deadline));
spi_timeout:
bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0);
pr_err("Timeout waiting for SPI to be ready!\n");
return -EBUSY;
}
static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
size_t len, bool cont)
{
u32 tmp;
int i;
for (i = 0; i < len; i++) {
/* Transmit Register File MSB */
bcm53xxspi_write(b53spi, B53SPI_MSPI_TXRAM + 4 * (i * 2),
(unsigned int)w_buf[i]);
}
for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp);
}
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
/* Start SPI transfer */
tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
tmp |= B53SPI_MSPI_SPCR2_SPE;
if (cont)
tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD;
bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp);
/* Wait for SPI to finish */
bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len));
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
size_t len, bool cont)
{
u32 tmp;
int i;
for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp);
}
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
/* Start SPI transfer */
tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
tmp |= B53SPI_MSPI_SPCR2_SPE;
if (cont)
tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD;
bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp);
/* Wait for SPI to finish */
bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len));
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct bcm53xxspi *b53spi = spi_master_get_devdata(master);
u8 *buf;
size_t left;
bcm53xxspi_disable_bspi(b53spi);
if (t->tx_buf) {
buf = (u8 *)t->tx_buf;
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
bool cont = !spi_transfer_is_last(master, t) ||
left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
buf += to_write;
}
}
if (t->rx_buf) {
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
size_t to_read = min_t(size_t, 16, left);
bool cont = !spi_transfer_is_last(master, t) ||
left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
buf += to_read;
}
}
return 0;
}
static int bcm53xxspi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg)
{
struct bcm53xxspi *b53spi = spi_master_get_devdata(spi->master);
int ret = 0;
if (msg->from + msg->len > BCM53XXSPI_FLASH_WINDOW)
return -EINVAL;
bcm53xxspi_enable_bspi(b53spi);
memcpy_fromio(msg->buf, b53spi->mmio_base + msg->from, msg->len);
msg->retlen = msg->len;
return ret;
}
/**************************************************
* BCMA
**************************************************/
static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
};
MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl);
static int bcm53xxspi_bcma_probe(struct bcma_device *core)
{
struct device *dev = &core->dev;
struct bcm53xxspi *b53spi;
struct spi_master *master;
int err;
if (core->bus->drv_cc.core->id.rev != 42) {
pr_err("SPI on SoC with unsupported ChipCommon rev\n");
return -ENOTSUPP;
}
master = spi_alloc_master(dev, sizeof(*b53spi));
if (!master)
return -ENOMEM;
b53spi = spi_master_get_devdata(master);
b53spi->master = master;
b53spi->core = core;
if (core->addr_s[0])
b53spi->mmio_base = devm_ioremap(dev, core->addr_s[0],
BCM53XXSPI_FLASH_WINDOW);
b53spi->bspi = true;
bcm53xxspi_disable_bspi(b53spi);
master->dev.of_node = dev->of_node;
master->transfer_one = bcm53xxspi_transfer_one;
if (b53spi->mmio_base)
master->spi_flash_read = bcm53xxspi_flash_read;
bcma_set_drvdata(core, b53spi);
err = devm_spi_register_master(dev, master);
if (err) {
spi_master_put(master);
bcma_set_drvdata(core, NULL);
return err;
}
return 0;
}
static struct bcma_driver bcm53xxspi_bcma_driver = {
.name = KBUILD_MODNAME,
.id_table = bcm53xxspi_bcma_tbl,
.probe = bcm53xxspi_bcma_probe,
};
/**************************************************
* Init & exit
**************************************************/
static int __init bcm53xxspi_module_init(void)
{
int err = 0;
err = bcma_driver_register(&bcm53xxspi_bcma_driver);
if (err)
pr_err("Failed to register bcma driver: %d\n", err);
return err;
}
static void __exit bcm53xxspi_module_exit(void)
{
bcma_driver_unregister(&bcm53xxspi_bcma_driver);
}
module_init(bcm53xxspi_module_init);
module_exit(bcm53xxspi_module_exit);
MODULE_DESCRIPTION("Broadcom BCM53xx SPI Controller driver");
MODULE_AUTHOR("Rafał Miłecki <zajec5@gmail.com>");
MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPI_BCM53XX_H
#define SPI_BCM53XX_H
#define B53SPI_BSPI_REVISION_ID 0x000
#define B53SPI_BSPI_SCRATCH 0x004
#define B53SPI_BSPI_MAST_N_BOOT_CTRL 0x008
#define B53SPI_BSPI_BUSY_STATUS 0x00c
#define B53SPI_BSPI_INTR_STATUS 0x010
#define B53SPI_BSPI_B0_STATUS 0x014
#define B53SPI_BSPI_B0_CTRL 0x018
#define B53SPI_BSPI_B1_STATUS 0x01c
#define B53SPI_BSPI_B1_CTRL 0x020
#define B53SPI_BSPI_STRAP_OVERRIDE_CTRL 0x024
#define B53SPI_BSPI_FLEX_MODE_ENABLE 0x028
#define B53SPI_BSPI_BITS_PER_CYCLE 0x02c
#define B53SPI_BSPI_BITS_PER_PHASE 0x030
#define B53SPI_BSPI_CMD_AND_MODE_BYTE 0x034
#define B53SPI_BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
#define B53SPI_BSPI_BSPI_XOR_VALUE 0x03c
#define B53SPI_BSPI_BSPI_XOR_ENABLE 0x040
#define B53SPI_BSPI_BSPI_PIO_MODE_ENABLE 0x044
#define B53SPI_BSPI_BSPI_PIO_IODIR 0x048
#define B53SPI_BSPI_BSPI_PIO_DATA 0x04c
/* RAF */
#define B53SPI_RAF_START_ADDR 0x100
#define B53SPI_RAF_NUM_WORDS 0x104
#define B53SPI_RAF_CTRL 0x108
#define B53SPI_RAF_FULLNESS 0x10c
#define B53SPI_RAF_WATERMARK 0x110
#define B53SPI_RAF_STATUS 0x114
#define B53SPI_RAF_READ_DATA 0x118
#define B53SPI_RAF_WORD_CNT 0x11c
#define B53SPI_RAF_CURR_ADDR 0x120
/* MSPI */
#define B53SPI_MSPI_SPCR0_LSB 0x200
#define B53SPI_MSPI_SPCR0_MSB 0x204
#define B53SPI_MSPI_SPCR1_LSB 0x208
#define B53SPI_MSPI_SPCR1_MSB 0x20c
#define B53SPI_MSPI_NEWQP 0x210
#define B53SPI_MSPI_ENDQP 0x214
#define B53SPI_MSPI_SPCR2 0x218
#define B53SPI_MSPI_SPCR2_SPE 0x00000040
#define B53SPI_MSPI_SPCR2_CONT_AFTER_CMD 0x00000080
#define B53SPI_MSPI_MSPI_STATUS 0x220
#define B53SPI_MSPI_MSPI_STATUS_SPIF 0x00000001
#define B53SPI_MSPI_CPTQP 0x224
#define B53SPI_MSPI_TXRAM 0x240 /* 32 registers, up to 0x2b8 */
#define B53SPI_MSPI_RXRAM 0x2c0 /* 32 registers, up to 0x33c */
#define B53SPI_MSPI_CDRAM 0x340 /* 16 registers, up to 0x37c */
#define B53SPI_CDRAM_PCS_PCS0 0x00000001
#define B53SPI_CDRAM_PCS_PCS1 0x00000002
#define B53SPI_CDRAM_PCS_PCS2 0x00000004
#define B53SPI_CDRAM_PCS_PCS3 0x00000008
#define B53SPI_CDRAM_PCS_DISABLE_ALL 0x0000000f
#define B53SPI_CDRAM_PCS_DSCK 0x00000010
#define B53SPI_CDRAM_BITSE 0x00000040
#define B53SPI_CDRAM_CONT 0x00000080
#define B53SPI_MSPI_WRITE_LOCK 0x380
#define B53SPI_MSPI_DISABLE_FLUSH_GEN 0x384
/* Interrupt */
#define B53SPI_INTR_RAF_LR_FULLNESS_REACHED 0x3a0
#define B53SPI_INTR_RAF_LR_TRUNCATED 0x3a4
#define B53SPI_INTR_RAF_LR_IMPATIENT 0x3a8
#define B53SPI_INTR_RAF_LR_SESSION_DONE 0x3ac
#define B53SPI_INTR_RAF_LR_OVERREAD 0x3b0
#define B53SPI_INTR_MSPI_DONE 0x3b4
#define B53SPI_INTR_MSPI_HALT_SET_TRANSACTION_DONE 0x3b8
#endif /* SPI_BCM53XX_H */
...@@ -352,22 +352,31 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) ...@@ -352,22 +352,31 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
if (IS_ERR(clk)) if (IS_ERR(clk))
return PTR_ERR(clk); return PTR_ERR(clk);
ret = clk_prepare_enable(clk);
if (ret)
return ret;
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
if (!rate) { if (!rate) {
struct clk *pll_clk = devm_clk_get(dev, "pll"); struct clk *pll_clk = devm_clk_get(dev, "pll");
if (IS_ERR(pll_clk)) if (IS_ERR(pll_clk)) {
return PTR_ERR(pll_clk); ret = PTR_ERR(pll_clk);
goto out_disable_clk;
}
ret = clk_prepare_enable(pll_clk);
if (ret)
goto out_disable_clk;
rate = clk_get_rate(pll_clk); rate = clk_get_rate(pll_clk);
if (!rate) clk_disable_unprepare(pll_clk);
return -EINVAL; if (!rate) {
ret = -EINVAL;
goto out_disable_clk;
}
} }
ret = clk_prepare_enable(clk);
if (ret)
return ret;
master = spi_alloc_master(&pdev->dev, sizeof(*bs)); master = spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) { if (!master) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -694,8 +694,7 @@ static int cdns_spi_remove(struct platform_device *pdev) ...@@ -694,8 +694,7 @@ static int cdns_spi_remove(struct platform_device *pdev)
*/ */
static int __maybe_unused cdns_spi_suspend(struct device *dev) static int __maybe_unused cdns_spi_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = dev_get_drvdata(dev);
struct spi_master *master = platform_get_drvdata(pdev);
return spi_master_suspend(master); return spi_master_suspend(master);
} }
...@@ -710,8 +709,7 @@ static int __maybe_unused cdns_spi_suspend(struct device *dev) ...@@ -710,8 +709,7 @@ static int __maybe_unused cdns_spi_suspend(struct device *dev)
*/ */
static int __maybe_unused cdns_spi_resume(struct device *dev) static int __maybe_unused cdns_spi_resume(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = dev_get_drvdata(dev);
struct spi_master *master = platform_get_drvdata(pdev);
struct cdns_spi *xspi = spi_master_get_devdata(master); struct cdns_spi *xspi = spi_master_get_devdata(master);
cdns_spi_init_hw(xspi); cdns_spi_init_hw(xspi);
......
/* // SPDX-License-Identifier: GPL-2.0+
* Freescale i.MX7ULP LPSPI driver //
* // Freescale i.MX7ULP LPSPI driver
* Copyright 2016 Freescale Semiconductor, Inc. //
* // Copyright 2016 Freescale Semiconductor, Inc.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/completion.h> #include <linux/completion.h>
......
/* // SPDX-License-Identifier: GPL-2.0+
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2008 Juergen Beisert // Copyright (C) 2008 Juergen Beisert
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301, USA.
*/
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/completion.h> #include <linux/completion.h>
......
This diff is collapsed.
...@@ -574,10 +574,15 @@ static int meson_spicc_probe(struct platform_device *pdev) ...@@ -574,10 +574,15 @@ static int meson_spicc_probe(struct platform_device *pdev)
master->max_speed_hz = rate >> 2; master->max_speed_hz = rate >> 2;
ret = devm_spi_register_master(&pdev->dev, master); ret = devm_spi_register_master(&pdev->dev, master);
if (!ret) if (ret) {
return 0; dev_err(&pdev->dev, "spi master registration failed\n");
goto out_clk;
}
dev_err(&pdev->dev, "spi master registration failed\n"); return 0;
out_clk:
clk_disable_unprepare(spicc->core);
out_master: out_master:
spi_master_put(master); spi_master_put(master);
......
...@@ -447,7 +447,7 @@ static int mpc52xx_spi_probe(struct platform_device *op) ...@@ -447,7 +447,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
for (i = 0; i < ms->gpio_cs_count; i++) { for (i = 0; i < ms->gpio_cs_count; i++) {
gpio_cs = of_get_gpio(op->dev.of_node, i); gpio_cs = of_get_gpio(op->dev.of_node, i);
if (gpio_cs < 0) { if (!gpio_is_valid(gpio_cs)) {
dev_err(&op->dev, dev_err(&op->dev,
"could not parse the gpio field in oftree\n"); "could not parse the gpio field in oftree\n");
rc = -ENODEV; rc = -ENODEV;
......
/* // SPDX-License-Identifier: GPL-2.0+
* Freescale MXS SPI master driver //
* // Freescale MXS SPI master driver
* Copyright 2012 DENX Software Engineering, GmbH. //
* Copyright 2012 Freescale Semiconductor, Inc. // Copyright 2012 DENX Software Engineering, GmbH.
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. // Copyright 2012 Freescale Semiconductor, Inc.
* // Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
* Rework and transition to new API by: //
* Marek Vasut <marex@denx.de> // Rework and transition to new API by:
* // Marek Vasut <marex@denx.de>
* Based on previous attempt by: //
* Fabio Estevam <fabio.estevam@freescale.com> // Based on previous attempt by:
* // Fabio Estevam <fabio.estevam@freescale.com>
* Based on code from U-Boot bootloader by: //
* Marek Vasut <marex@denx.de> // Based on code from U-Boot bootloader by:
* // Marek Vasut <marex@denx.de>
* Based on spi-stmp.c, which is: //
* Author: Dmitry Pervushin <dimka@embeddedalley.com> // Based on spi-stmp.c, which is:
* // Author: Dmitry Pervushin <dimka@embeddedalley.com>
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/ioport.h> #include <linux/ioport.h>
......
...@@ -255,6 +255,7 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) ...@@ -255,6 +255,7 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
if (spi->controller_state) { if (spi->controller_state) {
int err = pm_runtime_get_sync(mcspi->dev); int err = pm_runtime_get_sync(mcspi->dev);
if (err < 0) { if (err < 0) {
pm_runtime_put_noidle(mcspi->dev);
dev_err(mcspi->dev, "failed to get sync: %d\n", err); dev_err(mcspi->dev, "failed to get sync: %d\n", err);
return; return;
} }
...@@ -350,20 +351,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, ...@@ -350,20 +351,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
mcspi->fifo_depth = 0; mcspi->fifo_depth = 0;
} }
static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
{
struct spi_master *spi_cntrl = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs;
/* McSPI: context restore */
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
list_for_each_entry(cs, &ctx->cs, node)
writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{ {
unsigned long timeout; unsigned long timeout;
...@@ -1065,8 +1052,11 @@ static int omap2_mcspi_setup(struct spi_device *spi) ...@@ -1065,8 +1052,11 @@ static int omap2_mcspi_setup(struct spi_device *spi)
} }
ret = pm_runtime_get_sync(mcspi->dev); ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
return ret; return ret;
}
ret = omap2_mcspi_setup_transfer(spi, NULL); ret = omap2_mcspi_setup_transfer(spi, NULL);
pm_runtime_mark_last_busy(mcspi->dev); pm_runtime_mark_last_busy(mcspi->dev);
...@@ -1284,8 +1274,11 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) ...@@ -1284,8 +1274,11 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
int ret = 0; int ret = 0;
ret = pm_runtime_get_sync(mcspi->dev); ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
return ret; return ret;
}
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
OMAP2_MCSPI_WAKEUPENABLE_WKEN); OMAP2_MCSPI_WAKEUPENABLE_WKEN);
...@@ -1297,14 +1290,39 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) ...@@ -1297,14 +1290,39 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
return 0; return 0;
} }
/*
* When SPI wake up from off-mode, CS is in activate state. If it was in
* inactive state when driver was suspend, then force it to inactive state at
* wake up.
*/
static int omap_mcspi_runtime_resume(struct device *dev) static int omap_mcspi_runtime_resume(struct device *dev)
{ {
struct omap2_mcspi *mcspi; struct spi_master *master = dev_get_drvdata(dev);
struct spi_master *master; struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs;
master = dev_get_drvdata(dev); /* McSPI: context restore */
mcspi = spi_master_get_devdata(master); mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
omap2_mcspi_restore_ctx(mcspi); mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
list_for_each_entry(cs, &ctx->cs, node) {
/*
* We need to toggle CS state for OMAP take this
* change in account.
*/
if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
} else {
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
}
}
return 0; return 0;
} }
...@@ -1447,50 +1465,33 @@ static int omap2_mcspi_remove(struct platform_device *pdev) ...@@ -1447,50 +1465,33 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:omap2_mcspi"); MODULE_ALIAS("platform:omap2_mcspi");
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
/* static int omap2_mcspi_suspend_noirq(struct device *dev)
* When SPI wake up from off-mode, CS is in activate state. If it was in
* unactive state when driver was suspend, then force it to unactive state at
* wake up.
*/
static int omap2_mcspi_resume(struct device *dev)
{ {
struct spi_master *master = dev_get_drvdata(dev); return pinctrl_pm_select_sleep_state(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs;
pm_runtime_get_sync(mcspi->dev);
list_for_each_entry(cs, &ctx->cs, node) {
if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
/*
* We need to toggle CS state for OMAP take this
* change in account.
*/
cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
}
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return pinctrl_pm_select_default_state(dev);
} }
static int omap2_mcspi_suspend(struct device *dev) static int omap2_mcspi_resume_noirq(struct device *dev)
{ {
return pinctrl_pm_select_sleep_state(dev); struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
int error;
error = pinctrl_pm_select_default_state(dev);
if (error)
dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
__func__, error);
return 0;
} }
#else #else
#define omap2_mcspi_suspend NULL #define omap2_mcspi_suspend_noirq NULL
#define omap2_mcspi_resume NULL #define omap2_mcspi_resume_noirq NULL
#endif #endif
static const struct dev_pm_ops omap2_mcspi_pm_ops = { static const struct dev_pm_ops omap2_mcspi_pm_ops = {
.resume = omap2_mcspi_resume, .suspend_noirq = omap2_mcspi_suspend_noirq,
.suspend = omap2_mcspi_suspend, .resume_noirq = omap2_mcspi_resume_noirq,
.runtime_resume = omap_mcspi_runtime_resume, .runtime_resume = omap_mcspi_runtime_resume,
}; };
......
...@@ -51,19 +51,15 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, ...@@ -51,19 +51,15 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
if (!pxa25x_ssp_comp(drv_data)) if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_write(drv_data, SSTO, 0);
if (!error) { if (error) {
msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data);
} else {
/* In case we got an error we disable the SSP now */ /* In case we got an error we disable the SSP now */
pxa2xx_spi_write(drv_data, SSCR0, pxa2xx_spi_write(drv_data, SSCR0,
pxa2xx_spi_read(drv_data, SSCR0) pxa2xx_spi_read(drv_data, SSCR0)
& ~SSCR0_SSE); & ~SSCR0_SSE);
msg->status = -EIO;
msg->state = ERROR_STATE;
} }
tasklet_schedule(&drv_data->pump_transfers); spi_finalize_current_transfer(drv_data->master);
} }
} }
...@@ -74,11 +70,11 @@ static void pxa2xx_spi_dma_callback(void *data) ...@@ -74,11 +70,11 @@ static void pxa2xx_spi_dma_callback(void *data)
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
enum dma_transfer_direction dir) enum dma_transfer_direction dir,
struct spi_transfer *xfer)
{ {
struct chip_data *chip = struct chip_data *chip =
spi_get_ctldata(drv_data->master->cur_msg->spi); spi_get_ctldata(drv_data->master->cur_msg->spi);
struct spi_transfer *xfer = drv_data->cur_transfer;
enum dma_slave_buswidth width; enum dma_slave_buswidth width;
struct dma_slave_config cfg; struct dma_slave_config cfg;
struct dma_chan *chan; struct dma_chan *chan;
...@@ -144,12 +140,13 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) ...@@ -144,12 +140,13 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
return IRQ_NONE; return IRQ_NONE;
} }
int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
struct spi_transfer *xfer)
{ {
struct dma_async_tx_descriptor *tx_desc, *rx_desc; struct dma_async_tx_descriptor *tx_desc, *rx_desc;
int err; int err;
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
if (!tx_desc) { if (!tx_desc) {
dev_err(&drv_data->pdev->dev, dev_err(&drv_data->pdev->dev,
"failed to get DMA TX descriptor\n"); "failed to get DMA TX descriptor\n");
...@@ -157,7 +154,7 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) ...@@ -157,7 +154,7 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
goto err_tx; goto err_tx;
} }
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
if (!rx_desc) { if (!rx_desc) {
dev_err(&drv_data->pdev->dev, dev_err(&drv_data->pdev->dev,
"failed to get DMA RX descriptor\n"); "failed to get DMA RX descriptor\n");
...@@ -187,6 +184,13 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data) ...@@ -187,6 +184,13 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data)
atomic_set(&drv_data->dma_running, 1); atomic_set(&drv_data->dma_running, 1);
} }
void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
{
atomic_set(&drv_data->dma_running, 0);
dmaengine_terminate_sync(drv_data->master->dma_rx);
dmaengine_terminate_sync(drv_data->master->dma_tx);
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data) int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{ {
struct pxa2xx_spi_master *pdata = drv_data->master_info; struct pxa2xx_spi_master *pdata = drv_data->master_info;
......
This diff is collapsed.
...@@ -46,15 +46,10 @@ struct driver_data { ...@@ -46,15 +46,10 @@ struct driver_data {
u32 clear_sr; u32 clear_sr;
u32 mask_sr; u32 mask_sr;
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
/* DMA engine support */ /* DMA engine support */
atomic_t dma_running; atomic_t dma_running;
/* Current message transfer state info */ /* Current transfer state info */
struct spi_transfer *cur_transfer;
size_t len;
void *tx; void *tx;
void *tx_end; void *tx_end;
void *rx; void *rx;
...@@ -104,11 +99,6 @@ static inline void pxa2xx_spi_write(const struct driver_data *drv_data, ...@@ -104,11 +99,6 @@ static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
__raw_writel(val, drv_data->ioaddr + reg); __raw_writel(val, drv_data->ioaddr + reg);
} }
#define START_STATE ((void *)0)
#define RUNNING_STATE ((void *)1)
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
#define DMA_ALIGNMENT 8 #define DMA_ALIGNMENT 8
static inline int pxa25x_ssp_comp(struct driver_data *drv_data) static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
...@@ -133,14 +123,15 @@ static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) ...@@ -133,14 +123,15 @@ static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
} }
extern int pxa2xx_spi_flush(struct driver_data *drv_data); extern int pxa2xx_spi_flush(struct driver_data *drv_data);
extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
#define MAX_DMA_LEN SZ_64K #define MAX_DMA_LEN SZ_64K
#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL) #define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data); extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst); extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
struct spi_transfer *xfer);
extern void pxa2xx_spi_dma_start(struct driver_data *drv_data); extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_stop(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
......
...@@ -28,15 +28,15 @@ ...@@ -28,15 +28,15 @@
#define S3C64XX_SPI_CH_CFG 0x00 #define S3C64XX_SPI_CH_CFG 0x00
#define S3C64XX_SPI_CLK_CFG 0x04 #define S3C64XX_SPI_CLK_CFG 0x04
#define S3C64XX_SPI_MODE_CFG 0x08 #define S3C64XX_SPI_MODE_CFG 0x08
#define S3C64XX_SPI_SLAVE_SEL 0x0C #define S3C64XX_SPI_SLAVE_SEL 0x0C
#define S3C64XX_SPI_INT_EN 0x10 #define S3C64XX_SPI_INT_EN 0x10
#define S3C64XX_SPI_STATUS 0x14 #define S3C64XX_SPI_STATUS 0x14
#define S3C64XX_SPI_TX_DATA 0x18 #define S3C64XX_SPI_TX_DATA 0x18
#define S3C64XX_SPI_RX_DATA 0x1C #define S3C64XX_SPI_RX_DATA 0x1C
#define S3C64XX_SPI_PACKET_CNT 0x20 #define S3C64XX_SPI_PACKET_CNT 0x20
#define S3C64XX_SPI_PENDING_CLR 0x24 #define S3C64XX_SPI_PENDING_CLR 0x24
#define S3C64XX_SPI_SWAP_CFG 0x28 #define S3C64XX_SPI_SWAP_CFG 0x28
#define S3C64XX_SPI_FB_CLK 0x2C #define S3C64XX_SPI_FB_CLK 0x2C
#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
...@@ -77,9 +77,9 @@ ...@@ -77,9 +77,9 @@
#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
...@@ -100,7 +100,7 @@ ...@@ -100,7 +100,7 @@
#define S3C64XX_SPI_SWAP_TX_BIT (1<<1) #define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
#define S3C64XX_SPI_SWAP_TX_EN (1<<0) #define S3C64XX_SPI_SWAP_TX_EN (1<<0)
#define S3C64XX_SPI_FBCLK_MSK (3<<0) #define S3C64XX_SPI_FBCLK_MSK (3<<0)
#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id]) #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \ #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
...@@ -156,7 +156,6 @@ struct s3c64xx_spi_port_config { ...@@ -156,7 +156,6 @@ struct s3c64xx_spi_port_config {
* @ioclk: Pointer to the i/o clock between master and slave * @ioclk: Pointer to the i/o clock between master and slave
* @master: Pointer to the SPI Protocol master. * @master: Pointer to the SPI Protocol master.
* @cntrlr_info: Platform specific data for the controller this driver manages. * @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
* @lock: Controller specific lock. * @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status. * @state: Set of FLAGS to indicate status.
* @rx_dmach: Controller's DMA channel for Rx. * @rx_dmach: Controller's DMA channel for Rx.
...@@ -177,7 +176,6 @@ struct s3c64xx_spi_driver_data { ...@@ -177,7 +176,6 @@ struct s3c64xx_spi_driver_data {
struct platform_device *pdev; struct platform_device *pdev;
struct spi_master *master; struct spi_master *master;
struct s3c64xx_spi_info *cntrlr_info; struct s3c64xx_spi_info *cntrlr_info;
struct spi_device *tgl_spi;
spinlock_t lock; spinlock_t lock;
unsigned long sfr_start; unsigned long sfr_start;
struct completion xfer_completion; struct completion xfer_completion;
...@@ -190,7 +188,7 @@ struct s3c64xx_spi_driver_data { ...@@ -190,7 +188,7 @@ struct s3c64xx_spi_driver_data {
unsigned int port_id; unsigned int port_id;
}; };
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
unsigned long loops; unsigned long loops;
...@@ -350,9 +348,8 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master, ...@@ -350,9 +348,8 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
} }
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi, struct spi_transfer *xfer, int dma_mode)
struct spi_transfer *xfer, int dma_mode)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
u32 modecfg, chcfg; u32 modecfg, chcfg;
...@@ -442,8 +439,8 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, ...@@ -442,8 +439,8 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
return RX_FIFO_LVL(status, sdd); return RX_FIFO_LVL(status, sdd);
} }
static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer) struct spi_transfer *xfer)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
unsigned long val; unsigned long val;
...@@ -485,8 +482,8 @@ static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, ...@@ -485,8 +482,8 @@ static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
return 0; return 0;
} }
static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer) struct spi_transfer *xfer)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
unsigned long val; unsigned long val;
...@@ -505,6 +502,8 @@ static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, ...@@ -505,6 +502,8 @@ static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
status = readl(regs + S3C64XX_SPI_STATUS); status = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
if (!val)
return -EIO;
/* If it was only Tx */ /* If it was only Tx */
if (!xfer->rx_buf) { if (!xfer->rx_buf) {
...@@ -635,11 +634,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -635,11 +634,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
struct spi_transfer *xfer) struct spi_transfer *xfer)
{ {
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
const void *tx_buf = NULL;
void *rx_buf = NULL;
int target_len = 0, origin_len = 0;
int use_dma = 0;
int status; int status;
u32 speed; u32 speed;
u8 bpw; u8 bpw;
unsigned long flags; unsigned long flags;
int use_dma;
reinit_completion(&sdd->xfer_completion); reinit_completion(&sdd->xfer_completion);
...@@ -654,48 +657,77 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -654,48 +657,77 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
s3c64xx_spi_config(sdd); s3c64xx_spi_config(sdd);
} }
/* Polling method for xfers not bigger than FIFO capacity */ if (!is_polling(sdd) && (xfer->len > fifo_len) &&
use_dma = 0; sdd->rx_dma.ch && sdd->tx_dma.ch) {
if (!is_polling(sdd) &&
(sdd->rx_dma.ch && sdd->tx_dma.ch &&
(xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
use_dma = 1; use_dma = 1;
spin_lock_irqsave(&sdd->lock, flags); } else if (is_polling(sdd) && xfer->len > fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
origin_len = xfer->len;
/* Pending only which is to be done */ target_len = xfer->len;
sdd->state &= ~RXBUSY; if (xfer->len > fifo_len)
sdd->state &= ~TXBUSY; xfer->len = fifo_len;
}
enable_datapath(sdd, spi, xfer, use_dma); do {
spin_lock_irqsave(&sdd->lock, flags);
/* Start the signals */ /* Pending only which is to be done */
s3c64xx_spi_set_cs(spi, true); sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY;
spin_unlock_irqrestore(&sdd->lock, flags); s3c64xx_enable_datapath(sdd, xfer, use_dma);
if (use_dma) /* Start the signals */
status = wait_for_dma(sdd, xfer); s3c64xx_spi_set_cs(spi, true);
else
status = wait_for_pio(sdd, xfer); spin_unlock_irqrestore(&sdd->lock, flags);
if (status) { if (use_dma)
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", status = s3c64xx_wait_for_dma(sdd, xfer);
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, else
(sdd->state & RXBUSY) ? 'f' : 'p', status = s3c64xx_wait_for_pio(sdd, xfer);
(sdd->state & TXBUSY) ? 'f' : 'p',
xfer->len); if (status) {
dev_err(&spi->dev,
if (use_dma) { "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
if (xfer->tx_buf != NULL xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
&& (sdd->state & TXBUSY)) (sdd->state & RXBUSY) ? 'f' : 'p',
dmaengine_terminate_all(sdd->tx_dma.ch); (sdd->state & TXBUSY) ? 'f' : 'p',
if (xfer->rx_buf != NULL xfer->len);
&& (sdd->state & RXBUSY))
dmaengine_terminate_all(sdd->rx_dma.ch); if (use_dma) {
if (xfer->tx_buf && (sdd->state & TXBUSY))
dmaengine_terminate_all(sdd->tx_dma.ch);
if (xfer->rx_buf && (sdd->state & RXBUSY))
dmaengine_terminate_all(sdd->rx_dma.ch);
}
} else {
s3c64xx_flush_fifo(sdd);
} }
} else { if (target_len > 0) {
flush_fifo(sdd); target_len -= xfer->len;
if (xfer->tx_buf)
xfer->tx_buf += xfer->len;
if (xfer->rx_buf)
xfer->rx_buf += xfer->len;
if (target_len > fifo_len)
xfer->len = fifo_len;
else
xfer->len = target_len;
}
} while (target_len > 0);
if (origin_len) {
/* Restore original xfer buffers and length */
xfer->tx_buf = tx_buf;
xfer->rx_buf = rx_buf;
xfer->len = origin_len;
} }
return status; return status;
...@@ -891,7 +923,7 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) ...@@ -891,7 +923,7 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
{ {
struct s3c64xx_spi_info *sci = sdd->cntrlr_info; struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
...@@ -929,7 +961,7 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) ...@@ -929,7 +961,7 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
writel(val, regs + S3C64XX_SPI_MODE_CFG); writel(val, regs + S3C64XX_SPI_MODE_CFG);
flush_fifo(sdd); s3c64xx_flush_fifo(sdd);
} }
#ifdef CONFIG_OF #ifdef CONFIG_OF
...@@ -1145,7 +1177,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) ...@@ -1145,7 +1177,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
/* Setup Deufult Mode */ /* Setup Deufult Mode */
s3c64xx_spi_hwinit(sdd, sdd->port_id); s3c64xx_spi_hwinit(sdd);
spin_lock_init(&sdd->lock); spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion); init_completion(&sdd->xfer_completion);
...@@ -1260,8 +1292,6 @@ static int s3c64xx_spi_resume(struct device *dev) ...@@ -1260,8 +1292,6 @@ static int s3c64xx_spi_resume(struct device *dev)
if (ret < 0) if (ret < 0)
return ret; return ret;
s3c64xx_spi_hwinit(sdd, sdd->port_id);
return spi_master_resume(master); return spi_master_resume(master);
} }
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
...@@ -1299,6 +1329,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev) ...@@ -1299,6 +1329,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
if (ret != 0) if (ret != 0)
goto err_disable_src_clk; goto err_disable_src_clk;
s3c64xx_spi_hwinit(sdd);
return 0; return 0;
err_disable_src_clk: err_disable_src_clk:
...@@ -1344,15 +1376,6 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = { ...@@ -1344,15 +1376,6 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.clk_from_cmu = true, .clk_from_cmu = true,
}; };
static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
.fifo_lvl_mask = { 0x1ff },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_POLL,
};
static struct s3c64xx_spi_port_config exynos7_spi_port_config = { static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff}, .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
.rx_lvl_offset = 15, .rx_lvl_offset = 15,
...@@ -1396,9 +1419,6 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = { ...@@ -1396,9 +1419,6 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos4210-spi", { .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config, .data = (void *)&exynos4_spi_port_config,
}, },
{ .compatible = "samsung,exynos5440-spi",
.data = (void *)&exynos5440_spi_port_config,
},
{ .compatible = "samsung,exynos7-spi", { .compatible = "samsung,exynos7-spi",
.data = (void *)&exynos7_spi_port_config, .data = (void *)&exynos7_spi_port_config,
}, },
......
...@@ -39,7 +39,7 @@ struct sh_msiof_chipdata { ...@@ -39,7 +39,7 @@ struct sh_msiof_chipdata {
u16 tx_fifo_size; u16 tx_fifo_size;
u16 rx_fifo_size; u16 rx_fifo_size;
u16 master_flags; u16 master_flags;
u16 min_div; u16 min_div_pow;
}; };
struct sh_msiof_spi_priv { struct sh_msiof_spi_priv {
...@@ -51,7 +51,7 @@ struct sh_msiof_spi_priv { ...@@ -51,7 +51,7 @@ struct sh_msiof_spi_priv {
struct completion done; struct completion done;
unsigned int tx_fifo_size; unsigned int tx_fifo_size;
unsigned int rx_fifo_size; unsigned int rx_fifo_size;
unsigned int min_div; unsigned int min_div_pow;
void *tx_dma_page; void *tx_dma_page;
void *rx_dma_page; void *rx_dma_page;
dma_addr_t tx_dma_addr; dma_addr_t tx_dma_addr;
...@@ -249,43 +249,46 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) ...@@ -249,43 +249,46 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct { static const u32 sh_msiof_spi_div_array[] = {
unsigned short div; SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
unsigned short brdv; SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
} const sh_msiof_spi_div_table[] = {
{ 1, SCR_BRDV_DIV_1 },
{ 2, SCR_BRDV_DIV_2 },
{ 4, SCR_BRDV_DIV_4 },
{ 8, SCR_BRDV_DIV_8 },
{ 16, SCR_BRDV_DIV_16 },
{ 32, SCR_BRDV_DIV_32 },
}; };
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
unsigned long parent_rate, u32 spi_hz) unsigned long parent_rate, u32 spi_hz)
{ {
unsigned long div = 1024; unsigned long div;
u32 brps, scr; u32 brps, scr;
size_t k; unsigned int div_pow = p->min_div_pow;
if (!WARN_ON(!spi_hz || !parent_rate)) if (!spi_hz || !parent_rate) {
div = DIV_ROUND_UP(parent_rate, spi_hz); WARN(1, "Invalid clock rate parameters %lu and %u\n",
parent_rate, spi_hz);
div = max_t(unsigned long, div, p->min_div); return;
}
for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { div = DIV_ROUND_UP(parent_rate, spi_hz);
brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); if (div <= 1024) {
/* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
if (sh_msiof_spi_div_table[k].div == 1 && brps > 2) if (!div_pow && div <= 32 && div > 2)
continue; div_pow = 1;
if (brps <= 32) /* max of brdv is 32 */
break; if (div_pow)
} brps = (div + 1) >> div_pow;
else
brps = div;
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); for (; brps > 32; div_pow++)
brps = min_t(int, brps, 32); brps = (brps + 1) >> 1;
} else {
/* Set transfer rate composite divisor to 2^5 * 32 = 1024 */
dev_err(&p->pdev->dev,
"Requested SPI transfer rate %d is too low\n", spi_hz);
div_pow = 5;
brps = 32;
}
scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr); sh_msiof_write(p, TSCR, scr);
if (!(p->master->flags & SPI_MASTER_MUST_TX)) if (!(p->master->flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, RSCR, scr); sh_msiof_write(p, RSCR, scr);
...@@ -564,14 +567,16 @@ static int sh_msiof_spi_setup(struct spi_device *spi) ...@@ -564,14 +567,16 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
/* Configure native chip select mode/polarity early */ /* Configure native chip select mode/polarity early */
clr = MDR1_SYNCMD_MASK; clr = MDR1_SYNCMD_MASK;
set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI; set = MDR1_SYNCMD_SPI;
if (spi->mode & SPI_CS_HIGH) if (spi->mode & SPI_CS_HIGH)
clr |= BIT(MDR1_SYNCAC_SHIFT); clr |= BIT(MDR1_SYNCAC_SHIFT);
else else
set |= BIT(MDR1_SYNCAC_SHIFT); set |= BIT(MDR1_SYNCAC_SHIFT);
pm_runtime_get_sync(&p->pdev->dev); pm_runtime_get_sync(&p->pdev->dev);
tmp = sh_msiof_read(p, TMDR1) & ~clr; tmp = sh_msiof_read(p, TMDR1) & ~clr;
sh_msiof_write(p, TMDR1, tmp | set); sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
tmp = sh_msiof_read(p, RMDR1) & ~clr;
sh_msiof_write(p, RMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev); pm_runtime_put(&p->pdev->dev);
p->native_cs_high = spi->mode & SPI_CS_HIGH; p->native_cs_high = spi->mode & SPI_CS_HIGH;
p->native_cs_inited = true; p->native_cs_inited = true;
...@@ -1041,21 +1046,21 @@ static const struct sh_msiof_chipdata sh_data = { ...@@ -1041,21 +1046,21 @@ static const struct sh_msiof_chipdata sh_data = {
.tx_fifo_size = 64, .tx_fifo_size = 64,
.rx_fifo_size = 64, .rx_fifo_size = 64,
.master_flags = 0, .master_flags = 0,
.min_div = 1, .min_div_pow = 0,
}; };
static const struct sh_msiof_chipdata rcar_gen2_data = { static const struct sh_msiof_chipdata rcar_gen2_data = {
.tx_fifo_size = 64, .tx_fifo_size = 64,
.rx_fifo_size = 64, .rx_fifo_size = 64,
.master_flags = SPI_MASTER_MUST_TX, .master_flags = SPI_MASTER_MUST_TX,
.min_div = 1, .min_div_pow = 0,
}; };
static const struct sh_msiof_chipdata rcar_gen3_data = { static const struct sh_msiof_chipdata rcar_gen3_data = {
.tx_fifo_size = 64, .tx_fifo_size = 64,
.rx_fifo_size = 64, .rx_fifo_size = 64,
.master_flags = SPI_MASTER_MUST_TX, .master_flags = SPI_MASTER_MUST_TX,
.min_div = 2, .min_div_pow = 1,
}; };
static const struct of_device_id sh_msiof_match[] = { static const struct of_device_id sh_msiof_match[] = {
...@@ -1319,7 +1324,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ...@@ -1319,7 +1324,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p); platform_set_drvdata(pdev, p);
p->master = master; p->master = master;
p->info = info; p->info = info;
p->min_div = chipdata->min_div; p->min_div_pow = chipdata->min_div_pow;
init_completion(&p->done); init_completion(&p->done);
......
...@@ -1129,7 +1129,7 @@ static int stm32_spi_probe(struct platform_device *pdev) ...@@ -1129,7 +1129,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (!spi->clk_rate) { if (!spi->clk_rate) {
dev_err(&pdev->dev, "clk rate = 0\n"); dev_err(&pdev->dev, "clk rate = 0\n");
ret = -EINVAL; ret = -EINVAL;
goto err_master_put; goto err_clk_disable;
} }
spi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); spi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
struct ti_qspi_regs { struct ti_qspi_regs {
u32 clkctrl; u32 clkctrl;
...@@ -50,6 +51,7 @@ struct ti_qspi { ...@@ -50,6 +51,7 @@ struct ti_qspi {
struct spi_master *master; struct spi_master *master;
void __iomem *base; void __iomem *base;
void __iomem *mmap_base; void __iomem *mmap_base;
size_t mmap_size;
struct regmap *ctrl_base; struct regmap *ctrl_base;
unsigned int ctrl_reg; unsigned int ctrl_reg;
struct clk *fclk; struct clk *fclk;
...@@ -434,12 +436,10 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst, ...@@ -434,12 +436,10 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
return 0; return 0;
} }
static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
struct spi_flash_read_message *msg) void *to, size_t readsize)
{ {
size_t readsize = msg->len; dma_addr_t dma_src = qspi->mmap_phys_base + offs;
void *to = msg->buf;
dma_addr_t dma_src = qspi->mmap_phys_base + msg->from;
int ret = 0; int ret = 0;
/* /*
...@@ -507,13 +507,14 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) ...@@ -507,13 +507,14 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
qspi->mmap_enabled = false; qspi->mmap_enabled = false;
} }
static void ti_qspi_setup_mmap_read(struct spi_device *spi, static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
struct spi_flash_read_message *msg) u8 data_nbits, u8 addr_width,
u8 dummy_bytes)
{ {
struct ti_qspi *qspi = spi_master_get_devdata(spi->master); struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
u32 memval = msg->read_opcode; u32 memval = opcode;
switch (msg->data_nbits) { switch (data_nbits) {
case SPI_NBITS_QUAD: case SPI_NBITS_QUAD:
memval |= QSPI_SETUP_RD_QUAD; memval |= QSPI_SETUP_RD_QUAD;
break; break;
...@@ -524,48 +525,64 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, ...@@ -524,48 +525,64 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi,
memval |= QSPI_SETUP_RD_NORMAL; memval |= QSPI_SETUP_RD_NORMAL;
break; break;
} }
memval |= ((msg->addr_width - 1) << QSPI_SETUP_ADDR_SHIFT | memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
msg->dummy_bytes << QSPI_SETUP_DUMMY_SHIFT); dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
ti_qspi_write(qspi, memval, ti_qspi_write(qspi, memval,
QSPI_SPI_SETUP_REG(spi->chip_select)); QSPI_SPI_SETUP_REG(spi->chip_select));
} }
static bool ti_qspi_spi_flash_can_dma(struct spi_device *spi, static int ti_qspi_exec_mem_op(struct spi_mem *mem,
struct spi_flash_read_message *msg) const struct spi_mem_op *op)
{ {
return virt_addr_valid(msg->buf); struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
} u32 from = 0;
static int ti_qspi_spi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
int ret = 0; int ret = 0;
/* Only optimize read path. */
if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
!op->addr.nbytes || op->addr.nbytes > 4)
return -ENOTSUPP;
/* Address exceeds MMIO window size, fall back to regular mode. */
from = op->addr.val;
if (from + op->data.nbytes > qspi->mmap_size)
return -ENOTSUPP;
mutex_lock(&qspi->list_lock); mutex_lock(&qspi->list_lock);
if (!qspi->mmap_enabled) if (!qspi->mmap_enabled)
ti_qspi_enable_memory_map(spi); ti_qspi_enable_memory_map(mem->spi);
ti_qspi_setup_mmap_read(spi, msg); ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes);
if (qspi->rx_chan) { if (qspi->rx_chan) {
if (msg->cur_msg_mapped) struct sg_table sgt;
ret = ti_qspi_dma_xfer_sg(qspi, msg->rx_sg, msg->from);
else if (virt_addr_valid(op->data.buf.in) &&
ret = ti_qspi_dma_bounce_buffer(qspi, msg); !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
if (ret) &sgt)) {
goto err_unlock; ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
spi_controller_dma_unmap_mem_op_data(mem->spi->master,
op, &sgt);
} else {
ret = ti_qspi_dma_bounce_buffer(qspi, from,
op->data.buf.in,
op->data.nbytes);
}
} else { } else {
memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len); memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
op->data.nbytes);
} }
msg->retlen = msg->len;
err_unlock:
mutex_unlock(&qspi->list_lock); mutex_unlock(&qspi->list_lock);
return ret; return ret;
} }
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.exec_op = ti_qspi_exec_mem_op,
};
static int ti_qspi_start_transfer_one(struct spi_master *master, static int ti_qspi_start_transfer_one(struct spi_master *master,
struct spi_message *m) struct spi_message *m)
{ {
...@@ -672,7 +689,7 @@ static int ti_qspi_probe(struct platform_device *pdev) ...@@ -672,7 +689,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node; master->dev.of_node = pdev->dev.of_node;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8); SPI_BPW_MASK(8);
master->spi_flash_read = ti_qspi_spi_flash_read; master->mem_ops = &ti_qspi_mem_ops;
if (!of_property_read_u32(np, "num-cs", &num_cs)) if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs; master->num_chipselect = num_cs;
...@@ -702,6 +719,9 @@ static int ti_qspi_probe(struct platform_device *pdev) ...@@ -702,6 +719,9 @@ static int ti_qspi_probe(struct platform_device *pdev)
} }
} }
if (res_mmap)
qspi->mmap_size = resource_size(res_mmap);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n"); dev_err(&pdev->dev, "no irq resource?\n");
...@@ -770,7 +790,6 @@ static int ti_qspi_probe(struct platform_device *pdev) ...@@ -770,7 +790,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
dma_release_channel(qspi->rx_chan); dma_release_channel(qspi->rx_chan);
goto no_dma; goto no_dma;
} }
master->spi_flash_can_dma = ti_qspi_spi_flash_can_dma;
master->dma_rx = qspi->rx_chan; master->dma_rx = qspi->rx_chan;
init_completion(&qspi->transfer_complete); init_completion(&qspi->transfer_complete);
if (res_mmap) if (res_mmap)
...@@ -784,7 +803,7 @@ static int ti_qspi_probe(struct platform_device *pdev) ...@@ -784,7 +803,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
"mmap failed with error %ld using PIO mode\n", "mmap failed with error %ld using PIO mode\n",
PTR_ERR(qspi->mmap_base)); PTR_ERR(qspi->mmap_base));
qspi->mmap_base = NULL; qspi->mmap_base = NULL;
master->spi_flash_read = NULL; master->mem_ops = NULL;
} }
} }
qspi->mmap_enabled = false; qspi->mmap_enabled = false;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -135,6 +136,7 @@ ...@@ -135,6 +136,7 @@
#define GQSPI_DMA_UNALIGN 0x3 #define GQSPI_DMA_UNALIGN 0x3
#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */ #define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA}; enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
/** /**
...@@ -356,21 +358,9 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi, ...@@ -356,21 +358,9 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
static int zynqmp_prepare_transfer_hardware(struct spi_master *master) static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
{ {
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
int ret;
ret = clk_enable(xqspi->refclk);
if (ret)
return ret;
ret = clk_enable(xqspi->pclk);
if (ret)
goto clk_err;
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK); zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
return 0; return 0;
clk_err:
clk_disable(xqspi->refclk);
return ret;
} }
/** /**
...@@ -387,8 +377,6 @@ static int zynqmp_unprepare_transfer_hardware(struct spi_master *master) ...@@ -387,8 +377,6 @@ static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
return 0; return 0;
} }
...@@ -918,8 +906,7 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master, ...@@ -918,8 +906,7 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
*/ */
static int __maybe_unused zynqmp_qspi_suspend(struct device *dev) static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = dev_get_drvdata(dev);
struct spi_master *master = platform_get_drvdata(pdev);
spi_master_suspend(master); spi_master_suspend(master);
...@@ -939,8 +926,7 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev) ...@@ -939,8 +926,7 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
*/ */
static int __maybe_unused zynqmp_qspi_resume(struct device *dev) static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = dev_get_drvdata(dev);
struct spi_master *master = platform_get_drvdata(pdev);
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master); struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
int ret = 0; int ret = 0;
...@@ -959,11 +945,67 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev) ...@@ -959,11 +945,67 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
spi_master_resume(master); spi_master_resume(master);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
return 0; return 0;
} }
static SIMPLE_DEV_PM_OPS(zynqmp_qspi_dev_pm_ops, zynqmp_qspi_suspend, /**
zynqmp_qspi_resume); * zynqmp_runtime_suspend - Runtime suspend method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function disables the clocks
*
* Return: Always 0
*/
static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = platform_get_drvdata(pdev);
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
return 0;
}
/**
* zynqmp_runtime_resume - Runtime resume method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function enables the clocks
*
* Return: 0 on success and error value on error
*/
static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = platform_get_drvdata(pdev);
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
int ret;
ret = clk_enable(xqspi->pclk);
if (ret) {
dev_err(dev, "Cannot enable APB clock.\n");
return ret;
}
ret = clk_enable(xqspi->refclk);
if (ret) {
dev_err(dev, "Cannot enable device clock.\n");
clk_disable(xqspi->pclk);
return ret;
}
return 0;
}
static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
zynqmp_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
/** /**
* zynqmp_qspi_probe: Probe method for the QSPI driver * zynqmp_qspi_probe: Probe method for the QSPI driver
...@@ -1023,9 +1065,15 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) ...@@ -1023,9 +1065,15 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_pclk; goto clk_dis_pclk;
} }
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* QSPI controller initializations */ /* QSPI controller initializations */
zynqmp_qspi_init_hw(xqspi); zynqmp_qspi_init_hw(xqspi);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
xqspi->irq = platform_get_irq(pdev, 0); xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) { if (xqspi->irq <= 0) {
ret = -ENXIO; ret = -ENXIO;
...@@ -1063,6 +1111,8 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) ...@@ -1063,6 +1111,8 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
return 0; return 0;
clk_dis_all: clk_dis_all:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(xqspi->refclk); clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk: clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk); clk_disable_unprepare(xqspi->pclk);
...@@ -1090,6 +1140,8 @@ static int zynqmp_qspi_remove(struct platform_device *pdev) ...@@ -1090,6 +1140,8 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
clk_disable_unprepare(xqspi->refclk); clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk); clk_disable_unprepare(xqspi->pclk);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
spi_unregister_master(master); spi_unregister_master(master);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/pm_domain.h> #include <linux/pm_domain.h>
...@@ -46,6 +47,8 @@ ...@@ -46,6 +47,8 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/spi.h> #include <trace/events/spi.h>
#include "internals.h"
static DEFINE_IDR(spi_master_idr); static DEFINE_IDR(spi_master_idr);
static void spidev_release(struct device *dev) static void spidev_release(struct device *dev)
...@@ -740,9 +743,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable) ...@@ -740,9 +743,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
} }
#ifdef CONFIG_HAS_DMA #ifdef CONFIG_HAS_DMA
static int spi_map_buf(struct spi_controller *ctlr, struct device *dev, int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len, struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
const bool vmalloced_buf = is_vmalloc_addr(buf); const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev); unsigned int max_seg_size = dma_get_max_seg_size(dev);
...@@ -821,8 +824,8 @@ static int spi_map_buf(struct spi_controller *ctlr, struct device *dev, ...@@ -821,8 +824,8 @@ static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
return 0; return 0;
} }
static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir) struct sg_table *sgt, enum dma_data_direction dir)
{ {
if (sgt->orig_nents) { if (sgt->orig_nents) {
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
...@@ -907,19 +910,6 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) ...@@ -907,19 +910,6 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
return 0; return 0;
} }
#else /* !CONFIG_HAS_DMA */ #else /* !CONFIG_HAS_DMA */
static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
return -EINVAL;
}
static inline void spi_unmap_buf(struct spi_controller *ctlr,
struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir)
{
}
static inline int __spi_map_msg(struct spi_controller *ctlr, static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg) struct spi_message *msg)
{ {
...@@ -1222,6 +1212,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ...@@ -1222,6 +1212,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
if (!was_busy && ctlr->auto_runtime_pm) { if (!was_busy && ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent); ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) { if (ret < 0) {
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n", dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret); ret);
mutex_unlock(&ctlr->io_mutex); mutex_unlock(&ctlr->io_mutex);
...@@ -1533,6 +1524,22 @@ static int spi_controller_initialize_queue(struct spi_controller *ctlr) ...@@ -1533,6 +1524,22 @@ static int spi_controller_initialize_queue(struct spi_controller *ctlr)
return ret; return ret;
} }
/**
* spi_flush_queue - Send all pending messages in the queue from the callers'
* context
* @ctlr: controller to process queue for
*
* This should be used when one wants to ensure all pending messages have been
* sent before doing something. Is used by the spi-mem code to make sure SPI
* memory operations do not preempt regular SPI transfers that have been queued
* before the spi-mem operation.
*/
void spi_flush_queue(struct spi_controller *ctlr)
{
if (ctlr->transfer == spi_queued_transfer)
__spi_pump_messages(ctlr, false);
}
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF) #if defined(CONFIG_OF)
...@@ -2063,6 +2070,26 @@ static int of_spi_register_master(struct spi_controller *ctlr) ...@@ -2063,6 +2070,26 @@ static int of_spi_register_master(struct spi_controller *ctlr)
} }
#endif #endif
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
/*
* The controller may implement only the high-level SPI-memory like
* operations if it does not support regular SPI transfers, and this is
* valid use case.
* If ->mem_ops is NULL, we request that at least one of the
* ->transfer_xxx() method be implemented.
*/
if (ctlr->mem_ops) {
if (!ctlr->mem_ops->exec_op)
return -EINVAL;
} else if (!ctlr->transfer && !ctlr->transfer_one &&
!ctlr->transfer_one_message) {
return -EINVAL;
}
return 0;
}
/** /**
* spi_register_controller - register SPI master or slave controller * spi_register_controller - register SPI master or slave controller
* @ctlr: initialized master, originally from spi_alloc_master() or * @ctlr: initialized master, originally from spi_alloc_master() or
...@@ -2096,6 +2123,14 @@ int spi_register_controller(struct spi_controller *ctlr) ...@@ -2096,6 +2123,14 @@ int spi_register_controller(struct spi_controller *ctlr)
if (!dev) if (!dev)
return -ENODEV; return -ENODEV;
/*
* Make sure all necessary hooks are implemented before registering
* the SPI controller.
*/
status = spi_controller_check_ops(ctlr);
if (status)
return status;
if (!spi_controller_is_slave(ctlr)) { if (!spi_controller_is_slave(ctlr)) {
status = of_spi_register_master(ctlr); status = of_spi_register_master(ctlr);
if (status) if (status)
...@@ -2161,10 +2196,14 @@ int spi_register_controller(struct spi_controller *ctlr) ...@@ -2161,10 +2196,14 @@ int spi_register_controller(struct spi_controller *ctlr)
spi_controller_is_slave(ctlr) ? "slave" : "master", spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev)); dev_name(&ctlr->dev));
/* If we're using a queued driver, start the queue */ /*
if (ctlr->transfer) * If we're using a queued driver, start the queue. Note that we don't
* need the queueing logic if the driver is only supporting high-level
* memory operations.
*/
if (ctlr->transfer) {
dev_info(dev, "controller is unqueued, this is deprecated\n"); dev_info(dev, "controller is unqueued, this is deprecated\n");
else { } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
status = spi_controller_initialize_queue(ctlr); status = spi_controller_initialize_queue(ctlr);
if (status) { if (status) {
device_del(&ctlr->dev); device_del(&ctlr->dev);
...@@ -2894,6 +2933,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) ...@@ -2894,6 +2933,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
{ {
struct spi_controller *ctlr = spi->controller; struct spi_controller *ctlr = spi->controller;
/*
* Some controllers do not support doing regular SPI transfers. Return
* ENOTSUPP when this is the case.
*/
if (!ctlr->transfer)
return -ENOTSUPP;
message->spi = spi; message->spi = spi;
SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
...@@ -3010,63 +3056,6 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message) ...@@ -3010,63 +3056,6 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
} }
EXPORT_SYMBOL_GPL(spi_async_locked); EXPORT_SYMBOL_GPL(spi_async_locked);
int spi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg)
{
struct spi_controller *master = spi->controller;
struct device *rx_dev = NULL;
int ret;
if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
msg->addr_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
return -EINVAL;
if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
msg->addr_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_TX_QUAD))
return -EINVAL;
if (msg->data_nbits == SPI_NBITS_DUAL &&
!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
return -EINVAL;
if (msg->data_nbits == SPI_NBITS_QUAD &&
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
if (master->auto_runtime_pm) {
ret = pm_runtime_get_sync(master->dev.parent);
if (ret < 0) {
dev_err(&master->dev, "Failed to power device: %d\n",
ret);
return ret;
}
}
mutex_lock(&master->bus_lock_mutex);
mutex_lock(&master->io_mutex);
if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) {
rx_dev = master->dma_rx->device->dev;
ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
msg->buf, msg->len,
DMA_FROM_DEVICE);
if (!ret)
msg->cur_msg_mapped = true;
}
ret = master->spi_flash_read(spi, msg);
if (msg->cur_msg_mapped)
spi_unmap_buf(master, rx_dev, &msg->rx_sg,
DMA_FROM_DEVICE);
mutex_unlock(&master->io_mutex);
mutex_unlock(&master->bus_lock_mutex);
if (master->auto_runtime_pm)
pm_runtime_put(master->dev.parent);
return ret;
}
EXPORT_SYMBOL_GPL(spi_flash_read);
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
/* Utility methods for SPI protocol drivers, layered on /* Utility methods for SPI protocol drivers, layered on
......
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __LINUX_SPI_MEM_H
#define __LINUX_SPI_MEM_H
#include <linux/spi/spi.h>
#define SPI_MEM_OP_CMD(__opcode, __buswidth) \
{ \
.buswidth = __buswidth, \
.opcode = __opcode, \
}
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
{ \
.nbytes = __nbytes, \
.val = __val, \
.buswidth = __buswidth, \
}
#define SPI_MEM_OP_NO_ADDR { }
#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \
{ \
.nbytes = __nbytes, \
.buswidth = __buswidth, \
}
#define SPI_MEM_OP_NO_DUMMY { }
#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \
{ \
.dir = SPI_MEM_DATA_IN, \
.nbytes = __nbytes, \
.buf.in = __buf, \
.buswidth = __buswidth, \
}
#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
{ \
.dir = SPI_MEM_DATA_OUT, \
.nbytes = __nbytes, \
.buf.out = __buf, \
.buswidth = __buswidth, \
}
#define SPI_MEM_OP_NO_DATA { }
/**
* enum spi_mem_data_dir - describes the direction of a SPI memory data
* transfer from the controller perspective
* @SPI_MEM_DATA_IN: data coming from the SPI memory
* @SPI_MEM_DATA_OUT: data sent the SPI memory
*/
enum spi_mem_data_dir {
SPI_MEM_DATA_IN,
SPI_MEM_DATA_OUT,
};
/**
* struct spi_mem_op - describes a SPI memory operation
* @cmd.buswidth: number of IO lines used to transmit the command
* @cmd.opcode: operation opcode
* @addr.nbytes: number of address bytes to send. Can be zero if the operation
* does not need to send an address
* @addr.buswidth: number of IO lines used to transmit the address cycles
* @addr.val: address value. This value is always sent MSB first on the bus.
* Note that only @addr.nbytes are taken into account in this
* address value, so users should make sure the value fits in the
* assigned number of bytes.
* @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dir: direction of the transfer
* @data.buf.in: input buffer
* @data.buf.out: output buffer
*/
struct spi_mem_op {
struct {
u8 buswidth;
u8 opcode;
} cmd;
struct {
u8 nbytes;
u8 buswidth;
u64 val;
} addr;
struct {
u8 nbytes;
u8 buswidth;
} dummy;
struct {
u8 buswidth;
enum spi_mem_data_dir dir;
unsigned int nbytes;
/* buf.{in,out} must be DMA-able. */
union {
void *in;
const void *out;
} buf;
} data;
};
#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \
{ \
.cmd = __cmd, \
.addr = __addr, \
.dummy = __dummy, \
.data = __data, \
}
/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
* @drvpriv: spi_mem_drviver private data
*
* Extra information that describe the SPI memory device and may be needed by
* the controller to properly handle this device should be placed here.
*
* One example would be the device size since some controller expose their SPI
* mem devices through a io-mapped region.
*/
struct spi_mem {
struct spi_device *spi;
void *drvpriv;
};
/**
* struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
* device
* @mem: memory device
* @data: data to attach to the memory device
*/
static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
{
mem->drvpriv = data;
}
/**
* struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
* device
* @mem: memory device
*
* Return: the data attached to the mem device.
*/
static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
{
return mem->drvpriv;
}
/**
* struct spi_controller_mem_ops - SPI memory operations
* @adjust_op_size: shrink the data xfer of an operation to match controller's
* limitations (can be alignment of max RX/TX size
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
* case for QSPI controllers.
*/
struct spi_controller_mem_ops {
int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
bool (*supports_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
};
/**
* struct spi_mem_driver - SPI memory driver
* @spidrv: inherit from a SPI driver
* @probe: probe a SPI memory. Usually where detection/initialization takes
* place
* @remove: remove a SPI memory
* @shutdown: take appropriate action when the system is shutdown
*
* This is just a thin wrapper around a spi_driver. The core takes care of
* allocating the spi_mem object and forwarding the probe/remove/shutdown
* request to the spi_mem_driver. The reason we use this wrapper is because
* we might have to stuff more information into the spi_mem struct to let
* SPI controllers know more about the SPI memory they interact with, and
* having this intermediate layer allows us to do that without adding more
* useless fields to the spi_device object.
*/
struct spi_mem_driver {
struct spi_driver spidrv;
int (*probe)(struct spi_mem *mem);
int (*remove)(struct spi_mem *mem);
void (*shutdown)(struct spi_mem *mem);
};
#if IS_ENABLED(CONFIG_SPI_MEM)
int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg);
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg);
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg)
{
return -ENOTSUPP;
}
static inline void
spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg)
{
}
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
bool spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
int spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op);
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
void spi_mem_driver_unregister(struct spi_mem_driver *drv);
#define spi_mem_driver_register(__drv) \
spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
#define module_spi_mem_driver(__drv) \
module_driver(__drv, spi_mem_driver_register, \
spi_mem_driver_unregister)
#endif /* __LINUX_SPI_MEM_H */
...@@ -26,7 +26,7 @@ struct dma_chan; ...@@ -26,7 +26,7 @@ struct dma_chan;
struct property_entry; struct property_entry;
struct spi_controller; struct spi_controller;
struct spi_transfer; struct spi_transfer;
struct spi_flash_read_message; struct spi_controller_mem_ops;
/* /*
* INTERFACES between SPI master-side drivers and SPI slave protocol handlers, * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
...@@ -376,13 +376,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) ...@@ -376,13 +376,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* transfer_one callback. * transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs * @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message(). * in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
* This field is optional and should only be implemented if the
* controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message(). * @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller * @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @spi_flash_read: to support spi-controller hardwares that provide
* accelerated interface to read from flash devices.
* @spi_flash_can_dma: analogous to can_dma() interface, but for
* controllers implementing spi_flash_read.
* @flash_read_supported: spi device supports flash read
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that * number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself). * are not GPIOs (driven by the SPI controller itself).
...@@ -548,11 +546,6 @@ struct spi_controller { ...@@ -548,11 +546,6 @@ struct spi_controller {
int (*unprepare_message)(struct spi_controller *ctlr, int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message); struct spi_message *message);
int (*slave_abort)(struct spi_controller *ctlr); int (*slave_abort)(struct spi_controller *ctlr);
int (*spi_flash_read)(struct spi_device *spi,
struct spi_flash_read_message *msg);
bool (*spi_flash_can_dma)(struct spi_device *spi,
struct spi_flash_read_message *msg);
bool (*flash_read_supported)(struct spi_device *spi);
/* /*
* These hooks are for drivers that use a generic implementation * These hooks are for drivers that use a generic implementation
...@@ -564,6 +557,9 @@ struct spi_controller { ...@@ -564,6 +557,9 @@ struct spi_controller {
void (*handle_err)(struct spi_controller *ctlr, void (*handle_err)(struct spi_controller *ctlr,
struct spi_message *message); struct spi_message *message);
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
/* gpio chip select */ /* gpio chip select */
int *cs_gpios; int *cs_gpios;
...@@ -1183,48 +1179,6 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) ...@@ -1183,48 +1179,6 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
return be16_to_cpu(result); return be16_to_cpu(result);
} }
/**
* struct spi_flash_read_message - flash specific information for
* spi-masters that provide accelerated flash read interfaces
* @buf: buffer to read data
* @from: offset within the flash from where data is to be read
* @len: length of data to be read
* @retlen: actual length of data read
* @read_opcode: read_opcode to be used to communicate with flash
* @addr_width: number of address bytes
* @dummy_bytes: number of dummy bytes
* @opcode_nbits: number of lines to send opcode
* @addr_nbits: number of lines to send address
* @data_nbits: number of lines for data
* @rx_sg: Scatterlist for receive data read from flash
* @cur_msg_mapped: message has been mapped for DMA
*/
struct spi_flash_read_message {
void *buf;
loff_t from;
size_t len;
size_t retlen;
u8 read_opcode;
u8 addr_width;
u8 dummy_bytes;
u8 opcode_nbits;
u8 addr_nbits;
u8 data_nbits;
struct sg_table rx_sg;
bool cur_msg_mapped;
};
/* SPI core interface for flash read support */
static inline bool spi_flash_read_supported(struct spi_device *spi)
{
return spi->controller->spi_flash_read &&
(!spi->controller->flash_read_supported ||
spi->controller->flash_read_supported(spi));
}
int spi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg);
/*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment