Commit 2e5f0810 authored by Mark Brown's avatar Mark Brown

Merge branch 'spi-5.2' into spi-next

parents 58b860ed d61ad23c
......@@ -18,6 +18,10 @@ Optional properties:
- gpios : specifies the gpio pins to be used for chipselects.
The gpios will be referred to as reg = <index> in the SPI child nodes.
If unspecified, a single SPI device without a chip select can be used.
- fsl,spisel_boot : for the MPC8306 and MPC8309, specifies that the
SPISEL_BOOT signal is used as chip select for a slave device. Use
reg = <number of gpios> in the corresponding child node, i.e. 0 if
the gpios property is not present.
Example:
spi@4c0 {
......
......@@ -23,6 +23,18 @@ Required properties:
Recommended properties:
- spi-max-frequency: Definition as per
Documentation/devicetree/bindings/spi/spi-bus.txt
Optional properties:
- nvidia,tx-clk-tap-delay: Delays the clock going out to the external device
with this tap value. This property is used to tune the outgoing data from
Tegra SPI master with respect to outgoing Tegra SPI master clock.
Tap values vary based on the platform design trace lengths from Tegra SPI
to corresponding slave devices. Valid tap values are from 0 thru 63.
- nvidia,rx-clk-tap-delay: Delays the clock coming in from the external device
with this tap value. This property is used to adjust the Tegra SPI master
clock with respect to the data from the SPI slave device.
Tap values vary based on the platform design trace lengths from Tegra SPI
to corresponding slave devices. Valid tap values are from 0 thru 63.
Example:
spi@7000d600 {
......@@ -38,4 +50,12 @@ spi@7000d600 {
reset-names = "spi";
dmas = <&apbdma 16>, <&apbdma 16>;
dma-names = "rx", "tx";
<spi-client>@<bus_num> {
...
...
nvidia,rx-clk-tap-delay = <0>;
nvidia,tx-clk-tap-delay = <16>;
...
};
};
......@@ -4,6 +4,7 @@ Required properties:
- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
"renesas,msiof-r8a7744" (RZ/G1N)
"renesas,msiof-r8a7745" (RZ/G1E)
"renesas,msiof-r8a77470" (RZ/G1C)
"renesas,msiof-r8a774a1" (RZ/G2M)
"renesas,msiof-r8a774c0" (RZ/G2E)
"renesas,msiof-r8a7790" (R-Car H2)
......
......@@ -8,9 +8,16 @@ Required properties:
- interrupts : One interrupt, used by the controller.
- #address-cells : <1>, as required by generic SPI binding.
- #size-cells : <0>, also as required by generic SPI binding.
- clocks : phandles for the clocks, see the description of clock-names below.
The phandle for the "ssi_clk" is required. The phandle for the "pclk" clock
is optional. If a single clock is specified but no clock-name, it is the
"ssi_clk" clock. If both clocks are listed, the "ssi_clk" must be first.
Optional properties:
- cs-gpios : Specifies the gpio pis to be used for chipselects.
- clock-names : Contains the names of the clocks:
"ssi_clk", for the core clock used to generate the external SPI clock.
"pclk", the interface clock, required for register access.
- cs-gpios : Specifies the gpio pins to be used for chipselects.
- num-cs : The number of chipselects. If omitted, this will default to 4.
- reg-io-width : The I/O register width (in bytes) implemented by this
device. Supported values are 2 or 4 (the default).
......@@ -25,6 +32,7 @@ Example:
interrupts = <0 154 4>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&spi_m_clk>;
num-cs = <2>;
cs-gpios = <&gpio0 13 0>,
<&gpio0 14 0>;
......
......@@ -7,7 +7,11 @@ Required properties:
- reg : address and length of the lpspi master registers
- interrupt-parent : core interrupt controller
- interrupts : lpspi interrupt
- clocks : lpspi clock specifier
- clocks : lpspi clock specifier. Its number and order need to correspond to the
value in clock-names.
- clock-names : Corresponding to per clock and ipg clock in "clocks"
respectively. In i.MX7ULP, it only has per clk, so use CLK_DUMMY
to fill the "ipg" blank.
- spi-slave : spi slave mode support. In slave mode, add this attribute without
value. In master mode, remove it.
......@@ -18,6 +22,8 @@ lpspi2: lpspi@40290000 {
reg = <0x40290000 0x10000>;
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7ULP_CLK_LPSPI2>;
clocks = <&clks IMX7ULP_CLK_LPSPI2>,
<&clks IMX7ULP_CLK_DUMMY>;
clock-names = "per", "ipg";
spi-slave;
};
......@@ -10,6 +10,7 @@ Required properties:
- mediatek,mt8135-spi: for mt8135 platforms
- mediatek,mt8173-spi: for mt8173 platforms
- mediatek,mt8183-spi: for mt8183 platforms
- "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
- #address-cells: should be 1.
......
Binding for MTK SPI controller (MT7621 MIPS)
Required properties:
- compatible: Should be one of the following:
- "ralink,mt7621-spi": for mt7621/mt7628/mt7688 platforms
- #address-cells: should be 1.
- #size-cells: should be 0.
- reg: Address and length of the register set for the device
- resets: phandle to the reset controller asserting this device in
reset
See ../reset/reset.txt for details.
Optional properties:
- cs-gpios: see spi-bus.txt.
Example:
- SoC Specific Portion:
spi0: spi@b00 {
compatible = "ralink,mt7621-spi";
reg = <0xb00 0x100>;
#address-cells = <1>;
#size-cells = <0>;
resets = <&rstctrl 18>;
reset-names = "spi";
};
Xilinx Zynq QSPI controller Device Tree Bindings
-------------------------------------------------------------------
Required properties:
- compatible : Should be "xlnx,zynq-qspi-1.0".
- reg : Physical base address and size of QSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
Optional properties:
- num-cs : Number of chip selects used.
Example:
qspi: spi@e000d000 {
compatible = "xlnx,zynq-qspi-1.0";
reg = <0xe000d000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <0 19 4>;
clock-names = "ref_clk", "pclk";
clocks = <&clkc 10>, <&clkc 43>;
num-cs = <1>;
};
......@@ -572,6 +572,12 @@ SPI MASTER METHODS
0: transfer is finished
1: transfer is still in progress
master->set_cs_timing(struct spi_device *spi, u8 setup_clk_cycles,
u8 hold_clk_cycles, u8 inactive_clk_cycles)
This method allows SPI client drivers to request SPI master controller
for configuring device specific CS setup, hold and inactive timing
requirements.
DEPRECATED METHODS
master->transfer(struct spi_device *spi, struct spi_message *message)
......
......@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/gpio/machine.h>
#include <sound/cs4271.h>
......@@ -105,13 +106,16 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
},
};
static int edb93xx_spi_chipselects[] __initdata = {
EP93XX_GPIO_LINE_EGPIO6,
static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
.dev_id = "ep93xx-spi.0",
.table = {
GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
{ },
},
};
static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
.chipselect = edb93xx_spi_chipselects,
.num_chipselect = ARRAY_SIZE(edb93xx_spi_chipselects),
/* Intentionally left blank */
};
static void __init edb93xx_register_spi(void)
......@@ -123,6 +127,7 @@ static void __init edb93xx_register_spi(void)
else if (machine_is_edb9315a())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14;
gpiod_add_lookup_table(&edb93xx_spi_cs_gpio_table);
ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
ARRAY_SIZE(edb93xx_spi_board_info));
}
......
......@@ -77,13 +77,15 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
* low between multi-message command blocks. From v1.4, it uses a GPIO instead.
* v1.3 parts will still work, since the signal on SFRMOUT is automatic.
*/
static int simone_spi_chipselects[] __initdata = {
EP93XX_GPIO_LINE_EGPIO1,
static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
.dev_id = "ep93xx-spi.0",
.table = {
GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
{ },
},
};
static struct ep93xx_spi_info simone_spi_info __initdata = {
.chipselect = simone_spi_chipselects,
.num_chipselect = ARRAY_SIZE(simone_spi_chipselects),
.use_dma = 1,
};
......@@ -113,6 +115,7 @@ static void __init simone_init_machine(void)
ep93xx_register_i2c(simone_i2c_board_info,
ARRAY_SIZE(simone_i2c_board_info));
gpiod_add_lookup_table(&simone_mmc_spi_gpio_table);
gpiod_add_lookup_table(&simone_spi_cs_gpio_table);
ep93xx_register_spi(&simone_spi_info, simone_spi_devices,
ARRAY_SIZE(simone_spi_devices));
simone_register_audio();
......
......@@ -22,6 +22,7 @@
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/spi-ep93xx.h>
#include <linux/gpio/machine.h>
#include <mach/gpio-ep93xx.h>
#include <mach/hardware.h>
......@@ -269,13 +270,15 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = {
* The all work is performed automatically by !SPI_FRAME (SFRM1) and
* goes through CPLD
*/
static int bk3_spi_chipselects[] __initdata = {
EP93XX_GPIO_LINE_F(3),
static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
.dev_id = "ep93xx-spi.0",
.table = {
GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
{ },
},
};
static struct ep93xx_spi_info bk3_spi_master __initdata = {
.chipselect = bk3_spi_chipselects,
.num_chipselect = ARRAY_SIZE(bk3_spi_chipselects),
.use_dma = 1,
};
......@@ -316,13 +319,17 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = {
},
};
static int ts72xx_spi_chipselects[] __initdata = {
EP93XX_GPIO_LINE_F(2), /* DIO_17 */
static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
.dev_id = "ep93xx-spi.0",
.table = {
/* DIO_17 */
GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
{ },
},
};
static struct ep93xx_spi_info ts72xx_spi_info __initdata = {
.chipselect = ts72xx_spi_chipselects,
.num_chipselect = ARRAY_SIZE(ts72xx_spi_chipselects),
/* Intentionally left blank */
};
static void __init ts72xx_init_machine(void)
......@@ -339,6 +346,7 @@ static void __init ts72xx_init_machine(void)
if (board_is_ts7300())
platform_device_register(&ts73xx_fpga_device);
#endif
gpiod_add_lookup_table(&ts72xx_spi_cs_gpio_table);
ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices,
ARRAY_SIZE(ts72xx_spi_devices));
}
......@@ -398,6 +406,7 @@ static void __init bk3_init_machine(void)
ep93xx_register_eth(&ts72xx_eth_data, 1);
gpiod_add_lookup_table(&bk3_spi_cs_gpio_table);
ep93xx_register_spi(&bk3_spi_master, bk3_spi_board_info,
ARRAY_SIZE(bk3_spi_board_info));
......
......@@ -245,15 +245,17 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
},
};
static int vision_spi_chipselects[] __initdata = {
EP93XX_GPIO_LINE_EGPIO6,
EP93XX_GPIO_LINE_EGPIO7,
EP93XX_GPIO_LINE_G(2),
static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
.dev_id = "ep93xx-spi.0",
.table = {
GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("G", 2, "cs", 2, GPIO_ACTIVE_LOW),
{ },
},
};
static struct ep93xx_spi_info vision_spi_master __initdata = {
.chipselect = vision_spi_chipselects,
.num_chipselect = ARRAY_SIZE(vision_spi_chipselects),
.use_dma = 1,
};
......@@ -295,6 +297,7 @@ static void __init vision_init_machine(void)
ep93xx_register_i2c(vision_i2c_info,
ARRAY_SIZE(vision_i2c_info));
gpiod_add_lookup_table(&vision_spi_mmc_gpio_table);
gpiod_add_lookup_table(&vision_spi_cs_gpio_table);
ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
ARRAY_SIZE(vision_spi_board_info));
vision_register_i2s();
......
......@@ -806,7 +806,6 @@ static struct spi_board_info spi_bus[] = {
.platform_data = &mmc_spi_info,
.max_speed_hz = 5000000,
.mode = SPI_MODE_0,
.controller_data = (void *) GPIO_PTM4,
},
};
......@@ -838,6 +837,14 @@ static struct platform_device msiof0_device = {
.resource = msiof0_resources,
};
static struct gpiod_lookup_table msiof_gpio_table = {
.dev_id = "spi_sh_msiof.0",
.table = {
GPIO_LOOKUP("sh7724_pfc", GPIO_PTM4, "cs", GPIO_ACTIVE_HIGH),
{ },
},
};
#endif
/* FSI */
......@@ -1296,12 +1303,11 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
gpio_request(GPIO_FN_MSIOF0_RXD, NULL);
gpio_request(GPIO_FN_MSIOF0_TSCK, NULL);
gpio_request(GPIO_PTM4, NULL); /* software CS control of TSYNC pin */
gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
gpiod_add_lookup_table(&mmc_spi_gpio_table);
gpiod_add_lookup_table(&msiof_gpio_table);
spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
#endif
......
......@@ -426,6 +426,12 @@ config SPI_MT65XX
say Y or M here.If you are not sure, say N.
SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
config SPI_MT7621
tristate "MediaTek MT7621 SPI Controller"
depends on RALINK || COMPILE_TEST
help
This selects a driver for the MediaTek MT7621 SPI Controller.
config SPI_NPCM_PSPI
tristate "Nuvoton NPCM PSPI Controller"
depends on ARCH_NPCM || COMPILE_TEST
......@@ -842,9 +848,17 @@ config SPI_XTENSA_XTFPGA
16 bit words in SPI mode 0, automatically asserting CS on transfer
start and deasserting on end.
config SPI_ZYNQ_QSPI
tristate "Xilinx Zynq QSPI controller"
depends on ARCH_ZYNQ || COMPILE_TEST
help
This enables support for the Zynq Quad SPI controller
in master mode.
This controller only supports SPI memory interface.
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
depends on SPI_MASTER && HAS_DMA
depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
......
......@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
......@@ -118,6 +119,7 @@ obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
# SPI slave protocol handlers
......
......@@ -366,7 +366,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return err;
}
const char *atmel_qspi_get_name(struct spi_mem *spimem)
static const char *atmel_qspi_get_name(struct spi_mem *spimem)
{
return dev_name(spimem->spi->dev.parent);
}
......
......@@ -178,12 +178,6 @@ static int at91_usart_spi_setup(struct spi_device *spi)
struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
u32 *ausd = spi->controller_state;
unsigned int mr = at91_usart_spi_readl(aus, MR);
u8 bits = spi->bits_per_word;
if (bits != 8) {
dev_dbg(&spi->dev, "Only 8 bits per word are supported\n");
return -EINVAL;
}
if (spi->mode & SPI_CPOL)
mr |= US_MR_CPOL;
......@@ -212,7 +206,7 @@ static int at91_usart_spi_setup(struct spi_device *spi)
dev_dbg(&spi->dev,
"setup: bpw %u mode 0x%x -> mr %d %08x\n",
bits, spi->mode, spi->chip_select, mr);
spi->bits_per_word, spi->mode, spi->chip_select, mr);
return 0;
}
......
......@@ -335,20 +335,6 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
return 1;
}
/*
* DMA support
*
* this implementation has currently a few issues in so far as it does
* not work arrount limitations of the HW.
*
* the main one being that DMA transfers are limited to 16 bit
* (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
*
* there may be a few more border-cases we may need to address as well
* but unfortunately this would mean splitting up the scatter-gather
* list making it slightly unpractical...
*/
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
* @master: SPI master
......@@ -630,19 +616,6 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
return false;
/* BCM2835_SPI_DLEN has defined a max transfer size as
* 16 bit, so max is 65535
* we can revisit this by using an alternative transfer
* method - ideally this would get done without any more
* interaction...
*/
if (tfr->len > 65535) {
dev_warn_once(&spi->dev,
"transfer size of %d too big for dma-transfer\n",
tfr->len);
return false;
}
/* return OK */
return true;
}
......@@ -707,7 +680,6 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
/* all went well, so set can_dma */
master->can_dma = bcm2835_spi_can_dma;
master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
/* need to do TX AND RX DMA, so we need dummy buffers */
master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
......@@ -844,6 +816,17 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
int ret;
/*
* DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW
* due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is
* exceeded.
*/
ret = spi_split_transfers_maxsize(master, msg, 65532,
GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
......
......@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
......@@ -36,6 +37,12 @@
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
/* define polling limits */
static unsigned int polling_limit_us = 30;
module_param(polling_limit_us, uint, 0664);
MODULE_PARM_DESC(polling_limit_us,
"time in us to run a transfer in polling mode - if zero no polling is used\n");
/*
* spi register defines
*
......@@ -88,10 +95,6 @@
#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
/* timeout values */
#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
struct bcm2835aux_spi {
void __iomem *regs;
struct clk *clk;
......@@ -102,8 +105,53 @@ struct bcm2835aux_spi {
int tx_len;
int rx_len;
int pending;
u64 count_transfer_polling;
u64 count_transfer_irq;
u64 count_transfer_irq_after_poll;
struct dentry *debugfs_dir;
};
#if defined(CONFIG_DEBUG_FS)
static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
const char *dname)
{
char name[64];
struct dentry *dir;
/* get full name */
snprintf(name, sizeof(name), "spi-bcm2835aux-%s", dname);
/* the base directory */
dir = debugfs_create_dir(name, NULL);
bs->debugfs_dir = dir;
/* the counters */
debugfs_create_u64("count_transfer_polling", 0444, dir,
&bs->count_transfer_polling);
debugfs_create_u64("count_transfer_irq", 0444, dir,
&bs->count_transfer_irq);
debugfs_create_u64("count_transfer_irq_after_poll", 0444, dir,
&bs->count_transfer_irq_after_poll);
}
static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
{
debugfs_remove_recursive(bs->debugfs_dir);
bs->debugfs_dir = NULL;
}
#else
static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
const char *dname)
{
}
static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
{
}
#endif /* CONFIG_DEBUG_FS */
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
......@@ -123,9 +171,6 @@ static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
if (bs->rx_buf) {
switch (count) {
case 4:
*bs->rx_buf++ = (data >> 24) & 0xff;
/* fallthrough */
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
/* fallthrough */
......@@ -178,24 +223,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
}
static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
{
struct spi_master *master = dev_id;
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
irqreturn_t ret = IRQ_NONE;
/* IRQ may be shared, so return if our interrupts are disabled */
if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
(BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
return ret;
u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* check if we have data to read */
while (bs->rx_len &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
bcm2835aux_rd_fifo(bs);
ret = IRQ_HANDLED;
}
/* check if we have data to write */
while (bs->tx_len &&
......@@ -203,16 +238,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
ret = IRQ_HANDLED;
}
}
/* and check if we have reached "done" */
while (bs->rx_len &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_BUSY))) {
bcm2835aux_rd_fifo(bs);
ret = IRQ_HANDLED;
}
static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
/* IRQ may be shared, so return if our interrupts are disabled */
if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
(BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
return IRQ_NONE;
/* do common fifo handling */
bcm2835aux_spi_transfer_helper(bs);
if (!bs->tx_len) {
/* disable tx fifo empty interrupt */
......@@ -226,8 +266,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
complete(&master->xfer_completion);
}
/* and return */
return ret;
return IRQ_HANDLED;
}
static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
......@@ -251,6 +290,9 @@ static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
/* update statistics */
bs->count_transfer_irq++;
/* fill in registers and fifos before enabling interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
......@@ -273,35 +315,22 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
u32 stat;
/* update statistics */
bs->count_transfer_polling++;
/* configure spi */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
/* set the timeout */
timeout = jiffies + BCM2835_AUX_SPI_POLLING_JIFFIES;
/* set the timeout to at least 2 jiffies */
timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
/* read status */
stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* fill in tx fifo with remaining data */
if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
continue;
}
/* read data from fifo for both cases */
if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
bcm2835aux_rd_fifo(bs);
continue;
}
if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
bcm2835aux_rd_fifo(bs);
continue;
}
/* do common fifo handling */
bcm2835aux_spi_transfer_helper(bs);
/* there is still data pending to read check the timeout */
if (bs->rx_len && time_after(jiffies, timeout)) {
......@@ -310,6 +339,7 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* forward to interrupt handler */
bs->count_transfer_irq_after_poll++;
return __bcm2835aux_spi_transfer_one_irq(master,
spi, tfr);
}
......@@ -324,8 +354,8 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long spi_hz, clk_hz, speed;
unsigned long spi_used_hz;
unsigned long spi_hz, clk_hz, speed, spi_used_hz;
unsigned long hz_per_byte, byte_limit;
/* calculate the registers to handle
*
......@@ -369,14 +399,15 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in
* 30 µs per 300,000 Hz of bus clock.
*/
#define HZ_PER_BYTE ((9 * 1000000) / BCM2835_AUX_SPI_POLLING_LIMIT_US)
hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
/* run in polling mode for short transfers */
if (tfr->len < spi_used_hz / HZ_PER_BYTE)
if (tfr->len < byte_limit)
return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
/* run in interrupt mode for all others */
return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
#undef HZ_PER_BYTE
}
static int bcm2835aux_spi_prepare_message(struct spi_master *master,
......@@ -421,6 +452,50 @@ static void bcm2835aux_spi_handle_err(struct spi_master *master,
bcm2835aux_spi_reset_hw(bs);
}
static int bcm2835aux_spi_setup(struct spi_device *spi)
{
int ret;
/* sanity check for native cs */
if (spi->mode & SPI_NO_CS)
return 0;
if (gpio_is_valid(spi->cs_gpio)) {
/* with gpio-cs set the GPIO to the correct level
* and as output (in case the dt has the gpio not configured
* as output but native cs)
*/
ret = gpio_direction_output(spi->cs_gpio,
(spi->mode & SPI_CS_HIGH) ? 0 : 1);
if (ret)
dev_err(&spi->dev,
"could not set gpio %i as output: %i\n",
spi->cs_gpio, ret);
return ret;
}
/* for dt-backwards compatibility: only support native on CS0
* known things not supported with broken native CS:
* * multiple chip-selects: cs0-cs2 are all
* simultaniously asserted whenever there is a transfer
* this even includes SPI_NO_CS
* * SPI_CS_HIGH: cs are always asserted low
* * cs_change: cs is deasserted after each spi_transfer
* * cs_delay_usec: cs is always deasserted one SCK cycle
* after the last transfer
* probably more...
*/
dev_warn(&spi->dev,
"Native CS is not supported - please configure cs-gpio in device-tree\n");
if (spi->chip_select == 0)
return 0;
dev_warn(&spi->dev, "Native CS is not working for cs > 0\n");
return -EINVAL;
}
static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
......@@ -438,7 +513,19 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->num_chipselect = -1;
/* even though the driver never officially supported native CS
* allow a single native CS for legacy DT support purposes when
* no cs-gpio is configured.
* Known limitations for native cs are:
* * multiple chip-selects: cs0-cs2 are all simultaniously asserted
* whenever there is a transfer - this even includes SPI_NO_CS
* * SPI_CS_HIGH: is ignores - cs are always asserted low
* * cs_change: cs is deasserted after each spi_transfer
* * cs_delay_usec: cs is always deasserted one SCK cycle after
* a spi_transfer
*/
master->num_chipselect = 1;
master->setup = bcm2835aux_spi_setup;
master->transfer_one = bcm2835aux_spi_transfer_one;
master->handle_err = bcm2835aux_spi_handle_err;
master->prepare_message = bcm2835aux_spi_prepare_message;
......@@ -502,6 +589,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
bcm2835aux_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
out_clk_disable:
......@@ -516,6 +605,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
bcm2835aux_debugfs_remove(bs);
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
......
......@@ -335,6 +335,42 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
/*----------------------------------------------------------------------*/
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
if (!master || !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
if (master->transfer || master->transfer_one_message)
return -EINVAL;
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
if (!master->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
master->setup = spi_bitbang_setup;
master->cleanup = spi_bitbang_cleanup;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_init);
/**
* spi_bitbang_start - start up a polled/bitbanging SPI master driver
* @bitbang: driver handle
......@@ -368,33 +404,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
struct spi_master *master = bitbang->master;
int ret;
if (!master || !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
if (master->transfer || master->transfer_one_message)
return -EINVAL;
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
if (!master->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
master->setup = spi_bitbang_setup;
master->cleanup = spi_bitbang_cleanup;
}
}
ret = spi_bitbang_init(bitbang);
if (ret)
return ret;
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
......
......@@ -30,6 +30,7 @@
struct dw_spi_mmio {
struct dw_spi dws;
struct clk *clk;
struct clk *pclk;
void *priv;
};
......@@ -172,6 +173,14 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (ret)
return ret;
/* Optional clock needed to access the registers */
dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
if (IS_ERR(dwsmmio->pclk))
return PTR_ERR(dwsmmio->pclk);
ret = clk_prepare_enable(dwsmmio->pclk);
if (ret)
goto out_clk;
dws->bus_num = pdev->id;
dws->max_freq = clk_get_rate(dwsmmio->clk);
......@@ -199,6 +208,8 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
return 0;
out:
clk_disable_unprepare(dwsmmio->pclk);
out_clk:
clk_disable_unprepare(dwsmmio->clk);
return ret;
}
......@@ -208,6 +219,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsmmio->dws);
clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
return 0;
......
......@@ -28,7 +28,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/dma-ep93xx.h>
......@@ -676,6 +675,7 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
master->use_gpio_descriptors = true;
master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
master->prepare_message = ep93xx_spi_prepare_message;
......@@ -683,31 +683,11 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->num_chipselect = info->num_chipselect;
master->cs_gpios = devm_kcalloc(&master->dev,
master->num_chipselect, sizeof(int),
GFP_KERNEL);
if (!master->cs_gpios) {
error = -ENOMEM;
goto fail_release_master;
}
for (i = 0; i < master->num_chipselect; i++) {
master->cs_gpios[i] = info->chipselect[i];
if (!gpio_is_valid(master->cs_gpios[i]))
continue;
error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i],
GPIOF_OUT_INIT_HIGH,
"ep93xx-spi");
if (error) {
dev_err(&pdev->dev, "could not request cs gpio %d\n",
master->cs_gpios[i]);
goto fail_release_master;
}
}
/*
* The SPI core will count the number of GPIO descriptors to figure
* out the number of chip selects available on the platform.
*/
master->num_chipselect = 0;
platform_set_drvdata(pdev, master);
......
......@@ -95,8 +95,10 @@ static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
struct mpc8xxx_spi_probe_info {
struct fsl_spi_platform_data pdata;
int ngpios;
int *gpios;
bool *alow_flags;
__be32 __iomem *immr_spi_cs;
};
extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
......
......@@ -8,7 +8,10 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
......@@ -16,7 +19,12 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dma-imx.h>
#include <linux/platform_data/spi-imx.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
......@@ -24,6 +32,11 @@
#define DRIVER_NAME "fsl_lpspi"
#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
/* The maximum bytes that edma can transfer once.*/
#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
/* i.MX7ULP LPSPI registers */
#define IMX7ULP_VERID 0x0
#define IMX7ULP_PARAM 0x4
......@@ -57,6 +70,8 @@
#define IER_FCIE BIT(9)
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
#define DER_RDDE BIT(1)
#define DER_TDDE BIT(0)
#define CFGR1_PCSCFG BIT(27)
#define CFGR1_PINCFG (BIT(24)|BIT(25))
#define CFGR1_PCSPOL BIT(8)
......@@ -84,8 +99,11 @@ struct lpspi_config {
struct fsl_lpspi_data {
struct device *dev;
void __iomem *base;
struct clk *clk;
unsigned long base_phys;
struct clk *clk_ipg;
struct clk *clk_per;
bool is_slave;
bool is_first_byte;
void *rx_buf;
const void *tx_buf;
......@@ -101,6 +119,13 @@ struct fsl_lpspi_data {
struct completion xfer_done;
bool slave_aborted;
/* DMA */
bool usedma;
struct completion dma_rx_completion;
struct completion dma_tx_completion;
int chipselect[0];
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
......@@ -147,12 +172,48 @@ static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
writel(enable, fsl_lpspi->base + IMX7ULP_IER);
}
static int fsl_lpspi_bytes_per_word(const int bpw)
{
return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
}
static bool fsl_lpspi_can_dma(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
unsigned int bytes_per_word;
if (!controller->dma_rx)
return false;
bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
switch (bytes_per_word)
{
case 1:
case 2:
case 4:
break;
default:
return false;
}
return true;
}
static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
int ret;
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
return ret;
}
return clk_prepare_enable(fsl_lpspi->clk);
return 0;
}
static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
......@@ -160,7 +221,22 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
clk_disable_unprepare(fsl_lpspi->clk);
pm_runtime_mark_last_busy(fsl_lpspi->dev);
pm_runtime_put_autosuspend(fsl_lpspi->dev);
return 0;
}
static int fsl_lpspi_prepare_message(struct spi_controller *controller,
struct spi_message *msg)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
struct spi_device *spi = msg->spi;
int gpio = fsl_lpspi->chipselect[spi->chip_select];
if (gpio_is_valid(gpio))
gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
return 0;
}
......@@ -197,8 +273,7 @@ static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi->rx(fsl_lpspi);
}
static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
bool is_first_xfer)
static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp = 0;
......@@ -213,11 +288,13 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
* For the first transfer, clear TCR_CONTC to assert SS.
* For subsequent transfer, set TCR_CONTC to keep SS asserted.
*/
temp |= TCR_CONT;
if (is_first_xfer)
temp &= ~TCR_CONTC;
else
temp |= TCR_CONTC;
if (!fsl_lpspi->usedma) {
temp |= TCR_CONT;
if (fsl_lpspi->is_first_byte)
temp &= ~TCR_CONTC;
else
temp |= TCR_CONTC;
}
}
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
......@@ -228,7 +305,11 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
temp = fsl_lpspi->watermark >> 1 | (fsl_lpspi->watermark >> 1) << 16;
if (!fsl_lpspi->usedma)
temp = fsl_lpspi->watermark >> 1 |
(fsl_lpspi->watermark >> 1) << 16;
else
temp = fsl_lpspi->watermark >> 1;
writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
......@@ -241,7 +322,14 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
unsigned int perclk_rate, scldiv;
u8 prescale;
perclk_rate = clk_get_rate(fsl_lpspi->clk);
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
if (config.speed_hz > perclk_rate / 2) {
dev_err(fsl_lpspi->dev,
"per-clk should be at least two times of transfer speed");
return -EINVAL;
}
for (prescale = 0; prescale < 8; prescale++) {
scldiv = perclk_rate /
(clkdivs[prescale] * config.speed_hz) - 2;
......@@ -257,12 +345,59 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
fsl_lpspi->base + IMX7ULP_CCR);
dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
perclk_rate, config.speed_hz, prescale, scldiv);
return 0;
}
static int fsl_lpspi_dma_configure(struct spi_controller *controller)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
case 2:
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 1:
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
default:
return -EINVAL;
}
tx.direction = DMA_MEM_TO_DEV;
tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
tx.dst_addr_width = buswidth;
tx.dst_maxburst = 1;
ret = dmaengine_slave_config(controller->dma_tx, &tx);
if (ret) {
dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
ret);
return ret;
}
rx.direction = DMA_DEV_TO_MEM;
rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
rx.src_addr_width = buswidth;
rx.src_maxburst = 1;
ret = dmaengine_slave_config(controller->dma_rx, &rx);
if (ret) {
dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
ret);
return ret;
}
return 0;
}
static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
......@@ -288,18 +423,27 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
temp |= CR_RRF | CR_RTF | CR_MEN;
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
temp = 0;
if (fsl_lpspi->usedma)
temp = DER_TDDE | DER_RDDE;
writel(temp, fsl_lpspi->base + IMX7ULP_DER);
return 0;
}
static void fsl_lpspi_setup_transfer(struct spi_device *spi,
static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(spi->controller);
if (t == NULL)
return -EINVAL;
fsl_lpspi->config.mode = spi->mode;
fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
fsl_lpspi->config.bpw = t->bits_per_word;
fsl_lpspi->config.speed_hz = t->speed_hz;
fsl_lpspi->config.chip_select = spi->chip_select;
if (!fsl_lpspi->config.speed_hz)
......@@ -324,7 +468,12 @@ static void fsl_lpspi_setup_transfer(struct spi_device *spi,
else
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
fsl_lpspi_config(fsl_lpspi);
if (fsl_lpspi_can_dma(controller, spi, t))
fsl_lpspi->usedma = 1;
else
fsl_lpspi->usedma = 0;
return fsl_lpspi_config(fsl_lpspi);
}
static int fsl_lpspi_slave_abort(struct spi_controller *controller)
......@@ -333,7 +482,13 @@ static int fsl_lpspi_slave_abort(struct spi_controller *controller)
spi_controller_get_devdata(controller);
fsl_lpspi->slave_aborted = true;
complete(&fsl_lpspi->xfer_done);
if (!fsl_lpspi->usedma)
complete(&fsl_lpspi->xfer_done);
else {
complete(&fsl_lpspi->dma_tx_completion);
complete(&fsl_lpspi->dma_rx_completion);
}
return 0;
}
......@@ -362,8 +517,10 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
/* Disable all interrupt */
fsl_lpspi_intctrl(fsl_lpspi, 0);
if (!fsl_lpspi->usedma) {
/* Disable all interrupt */
fsl_lpspi_intctrl(fsl_lpspi, 0);
}
/* W1C for all flags in SR */
temp = 0x3F << 8;
......@@ -376,8 +533,177 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
return 0;
}
static int fsl_lpspi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
static void fsl_lpspi_dma_rx_callback(void *cookie)
{
struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
complete(&fsl_lpspi->dma_rx_completion);
}
static void fsl_lpspi_dma_tx_callback(void *cookie)
{
struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
complete(&fsl_lpspi->dma_tx_completion);
}
static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
int size)
{
unsigned long timeout = 0;
/* Time with actual data transfer and CS change delay related to HW */
timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
/* Add extra second for scheduler related activities */
timeout += 1;
/* Double calculated timeout */
return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
}
static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
struct fsl_lpspi_data *fsl_lpspi,
struct spi_transfer *transfer)
{
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
unsigned long transfer_timeout;
unsigned long timeout;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
int ret;
ret = fsl_lpspi_dma_configure(controller);
if (ret)
return ret;
desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
return -EINVAL;
desc_rx->callback = fsl_lpspi_dma_rx_callback;
desc_rx->callback_param = (void *)fsl_lpspi;
dmaengine_submit(desc_rx);
reinit_completion(&fsl_lpspi->dma_rx_completion);
dma_async_issue_pending(controller->dma_rx);
desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dmaengine_terminate_all(controller->dma_tx);
return -EINVAL;
}
desc_tx->callback = fsl_lpspi_dma_tx_callback;
desc_tx->callback_param = (void *)fsl_lpspi;
dmaengine_submit(desc_tx);
reinit_completion(&fsl_lpspi->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
fsl_lpspi->slave_aborted = false;
if (!fsl_lpspi->is_slave) {
transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
transfer->len);
/* Wait eDMA to finish the data transfer.*/
timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
transfer_timeout);
if (!timeout) {
dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -ETIMEDOUT;
}
timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
transfer_timeout);
if (!timeout) {
dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -ETIMEDOUT;
}
} else {
if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
fsl_lpspi->slave_aborted) {
dev_dbg(fsl_lpspi->dev,
"I/O Error in DMA TX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -EINTR;
}
if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
fsl_lpspi->slave_aborted) {
dev_dbg(fsl_lpspi->dev,
"I/O Error in DMA RX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -EINTR;
}
}
fsl_lpspi_reset(fsl_lpspi);
return 0;
}
static void fsl_lpspi_dma_exit(struct spi_controller *controller)
{
if (controller->dma_rx) {
dma_release_channel(controller->dma_rx);
controller->dma_rx = NULL;
}
if (controller->dma_tx) {
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
}
}
static int fsl_lpspi_dma_init(struct device *dev,
struct fsl_lpspi_data *fsl_lpspi,
struct spi_controller *controller)
{
int ret;
/* Prepare for TX DMA: */
controller->dma_tx = dma_request_slave_channel_reason(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
controller->dma_tx = NULL;
goto err;
}
/* Prepare for RX DMA: */
controller->dma_rx = dma_request_slave_channel_reason(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
controller->dma_rx = NULL;
goto err;
}
init_completion(&fsl_lpspi->dma_rx_completion);
init_completion(&fsl_lpspi->dma_tx_completion);
controller->can_dma = fsl_lpspi_can_dma;
controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
return 0;
err:
fsl_lpspi_dma_exit(controller);
return ret;
}
static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
......@@ -402,37 +728,30 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
return 0;
}
static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
struct spi_message *msg)
static int fsl_lpspi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
struct spi_device *spi = msg->spi;
struct spi_transfer *xfer;
bool is_first_xfer = true;
int ret = 0;
msg->status = 0;
msg->actual_length = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
fsl_lpspi_setup_transfer(spi, xfer);
fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer);
is_first_xfer = false;
spi_controller_get_devdata(controller);
int ret;
ret = fsl_lpspi_transfer_one(controller, spi, xfer);
if (ret < 0)
goto complete;
fsl_lpspi->is_first_byte = true;
ret = fsl_lpspi_setup_transfer(controller, spi, t);
if (ret < 0)
return ret;
msg->actual_length += xfer->len;
}
fsl_lpspi_set_cmd(fsl_lpspi);
fsl_lpspi->is_first_byte = false;
complete:
msg->status = ret;
spi_finalize_current_message(controller);
if (fsl_lpspi->usedma)
ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
else
ret = fsl_lpspi_pio_transfer(controller, t);
if (ret < 0)
return ret;
return ret;
return 0;
}
static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
......@@ -467,15 +786,67 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
return IRQ_NONE;
}
#ifdef CONFIG_PM
static int fsl_lpspi_runtime_resume(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct fsl_lpspi_data *fsl_lpspi;
int ret;
fsl_lpspi = spi_controller_get_devdata(controller);
ret = clk_prepare_enable(fsl_lpspi->clk_per);
if (ret)
return ret;
ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
if (ret) {
clk_disable_unprepare(fsl_lpspi->clk_per);
return ret;
}
return 0;
}
static int fsl_lpspi_runtime_suspend(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct fsl_lpspi_data *fsl_lpspi;
fsl_lpspi = spi_controller_get_devdata(controller);
clk_disable_unprepare(fsl_lpspi->clk_per);
clk_disable_unprepare(fsl_lpspi->clk_ipg);
return 0;
}
#endif
static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
{
struct device *dev = fsl_lpspi->dev;
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(dev);
return 0;
}
static int fsl_lpspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct fsl_lpspi_data *fsl_lpspi;
struct spi_controller *controller;
struct spi_imx_master *lpspi_platform_info =
dev_get_platdata(&pdev->dev);
struct resource *res;
int ret, irq;
int i, ret, irq;
u32 temp;
bool is_slave;
if (of_property_read_bool((&pdev->dev)->of_node, "spi-slave"))
is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_slave)
controller = spi_alloc_slave(&pdev->dev,
sizeof(struct fsl_lpspi_data));
else
......@@ -487,15 +858,35 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, controller);
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
controller->bus_num = pdev->id;
fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_slave = of_property_read_bool((&pdev->dev)->of_node,
"spi-slave");
fsl_lpspi->is_slave = is_slave;
controller->transfer_one_message = fsl_lpspi_transfer_one_msg;
if (!fsl_lpspi->is_slave) {
for (i = 0; i < controller->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
if (!gpio_is_valid(cs_gpio) && lpspi_platform_info)
cs_gpio = lpspi_platform_info->chipselect[i];
fsl_lpspi->chipselect[i] = cs_gpio;
if (!gpio_is_valid(cs_gpio))
continue;
ret = devm_gpio_request(&pdev->dev,
fsl_lpspi->chipselect[i],
DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev, "can't get cs gpios\n");
goto out_controller_put;
}
}
controller->cs_gpios = fsl_lpspi->chipselect;
controller->prepare_message = fsl_lpspi_prepare_message;
}
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
controller->transfer_one = fsl_lpspi_transfer_one;
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
......@@ -512,6 +903,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = PTR_ERR(fsl_lpspi->base);
goto out_controller_put;
}
fsl_lpspi->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
......@@ -526,23 +918,39 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
goto out_controller_put;
}
fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fsl_lpspi->clk)) {
ret = PTR_ERR(fsl_lpspi->clk);
fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(fsl_lpspi->clk_per)) {
ret = PTR_ERR(fsl_lpspi->clk_per);
goto out_controller_put;
}
ret = clk_prepare_enable(fsl_lpspi->clk);
if (ret) {
dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fsl_lpspi->clk_ipg)) {
ret = PTR_ERR(fsl_lpspi->clk_ipg);
goto out_controller_put;
}
/* enable the clock */
ret = fsl_lpspi_init_rpm(fsl_lpspi);
if (ret)
goto out_controller_put;
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
return ret;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
clk_disable_unprepare(fsl_lpspi->clk);
ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
if (ret == -EPROBE_DEFER)
goto out_controller_put;
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
......@@ -564,15 +972,50 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
clk_disable_unprepare(fsl_lpspi->clk);
pm_runtime_disable(fsl_lpspi->dev);
spi_master_put(controller);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int fsl_lpspi_suspend(struct device *dev)
{
int ret;
pinctrl_pm_select_sleep_state(dev);
ret = pm_runtime_force_suspend(dev);
return ret;
}
static int fsl_lpspi_resume(struct device *dev)
{
int ret;
ret = pm_runtime_force_resume(dev);
if (ret) {
dev_err(dev, "Error in resume: %d\n", ret);
return ret;
}
pinctrl_pm_select_default_state(dev);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops fsl_lpspi_pm_ops = {
SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
fsl_lpspi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
};
static struct platform_driver fsl_lpspi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fsl_lpspi_dt_ids,
.pm = &fsl_lpspi_pm_ops,
},
.probe = fsl_lpspi_probe,
.remove = fsl_lpspi_remove,
......
......@@ -882,7 +882,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
ret = spi_register_controller(ctlr);
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_destroy_mutex;
......
......@@ -39,6 +39,14 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
#ifdef CONFIG_FSL_SOC
#include <sysdev/fsl_soc.h>
#endif
/* Specific to the MPC8306/MPC8309 */
#define IMMR_SPI_CS_OFFSET 0x14c
#define SPI_BOOT_SEL_BIT 0x80000000
#include "spi-fsl-lib.h"
#include "spi-fsl-cpm.h"
#include "spi-fsl-spi.h"
......@@ -355,33 +363,50 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static int fsl_spi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct spi_device *spi = m->spi;
struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
int status;
int status, last_bpw;
/*
* In CPU mode, optimize large byte transfers to use larger
* bits_per_word values to reduce number of interrupts taken.
*/
if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->len < 256 || t->bits_per_word != 8)
continue;
if ((t->len & 3) == 0)
t->bits_per_word = 32;
else if ((t->len & 1) == 0)
t->bits_per_word = 16;
}
}
/* Don't allow changes if CS is active */
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
cs_change = 1;
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
if (cs_change)
first = t;
cs_change = t->cs_change;
if (first->speed_hz != t->speed_hz) {
dev_err(&spi->dev,
"bits_per_word/speed_hz should be same for the same SPI transfer\n");
"speed_hz cannot change while CS is active\n");
return -EINVAL;
}
}
last_bpw = -1;
cs_change = 1;
status = -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->bits_per_word || t->speed_hz) {
if (cs_change)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
break;
}
if (cs_change || last_bpw != t->bits_per_word)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
break;
last_bpw = t->bits_per_word;
if (cs_change) {
fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
......@@ -701,10 +726,17 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
bool alow = pinfo->alow_flags[cs];
gpio_set_value(gpio, on ^ alow);
if (cs < pinfo->ngpios) {
int gpio = pinfo->gpios[cs];
bool alow = pinfo->alow_flags[cs];
gpio_set_value(gpio, on ^ alow);
} else {
if (WARN_ON_ONCE(cs > pinfo->ngpios || !pinfo->immr_spi_cs))
return;
iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
}
}
static int of_fsl_spi_get_chipselects(struct device *dev)
......@@ -712,12 +744,15 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
bool spisel_boot = IS_ENABLED(CONFIG_FSL_SOC) &&
of_property_read_bool(np, "fsl,spisel_boot");
int ngpios;
int i = 0;
int ret;
ngpios = of_gpio_count(np);
if (ngpios <= 0) {
ngpios = max(ngpios, 0);
if (ngpios == 0 && !spisel_boot) {
/*
* SPI w/o chip-select line. One SPI device is still permitted
* though.
......@@ -726,6 +761,7 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
return 0;
}
pinfo->ngpios = ngpios;
pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
GFP_KERNEL);
if (!pinfo->gpios)
......@@ -769,7 +805,18 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
}
}
pdata->max_chipselect = ngpios;
#if IS_ENABLED(CONFIG_FSL_SOC)
if (spisel_boot) {
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
if (!pinfo->immr_spi_cs) {
ret = -ENOMEM;
i = ngpios - 1;
goto err_loop;
}
}
#endif
pdata->max_chipselect = ngpios + spisel_boot;
pdata->cs_control = fsl_spi_cs_control;
return 0;
......
......@@ -41,13 +41,10 @@
struct spi_gpio {
struct spi_bitbang bitbang;
struct spi_gpio_platform_data pdata;
struct platform_device *pdev;
struct gpio_desc *sck;
struct gpio_desc *miso;
struct gpio_desc *mosi;
struct gpio_desc **cs_gpios;
bool has_cs;
};
/*----------------------------------------------------------------------*/
......@@ -95,12 +92,6 @@ spi_to_spi_gpio(const struct spi_device *spi)
return spi_gpio;
}
static inline struct spi_gpio_platform_data *__pure
spi_to_pdata(const struct spi_device *spi)
{
return &spi_to_spi_gpio(spi)->pdata;
}
/* These helpers are in turn called by the bitbang inlines */
static inline void setsck(const struct spi_device *spi, int is_on)
{
......@@ -223,7 +214,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
/* Drive chip select line, if we have one */
if (spi_gpio->has_cs) {
if (spi_gpio->cs_gpios) {
struct gpio_desc *cs = spi_gpio->cs_gpios[spi->chip_select];
/* SPI chip selects are normally active-low */
......@@ -241,10 +232,12 @@ static int spi_gpio_setup(struct spi_device *spi)
* The CS GPIOs have already been
* initialized from the descriptor lookup.
*/
cs = spi_gpio->cs_gpios[spi->chip_select];
if (!spi->controller_state && cs)
status = gpiod_direction_output(cs,
!(spi->mode & SPI_CS_HIGH));
if (spi_gpio->cs_gpios) {
cs = spi_gpio->cs_gpios[spi->chip_select];
if (!spi->controller_state && cs)
status = gpiod_direction_output(cs,
!(spi->mode & SPI_CS_HIGH));
}
if (!status)
status = spi_bitbang_setup(spi);
......@@ -295,40 +288,20 @@ static void spi_gpio_cleanup(struct spi_device *spi)
* floating signals. (A weak pulldown would save power too, but many
* drivers expect to see all-ones data as the no slave "response".)
*/
static int spi_gpio_request(struct device *dev,
struct spi_gpio *spi_gpio,
unsigned int num_chipselects,
u16 *mflags)
static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
{
int i;
spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->mosi))
return PTR_ERR(spi_gpio->mosi);
if (!spi_gpio->mosi)
/* HW configuration without MOSI pin */
*mflags |= SPI_MASTER_NO_TX;
spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
/*
* No setting SPI_MASTER_NO_RX here - if there is only a MOSI
* pin connected the host can still do RX by changing the
* direction of the line.
*/
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->sck))
return PTR_ERR(spi_gpio->sck);
for (i = 0; i < num_chipselects; i++) {
spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
i, GPIOD_OUT_HIGH);
if (IS_ERR(spi_gpio->cs_gpios[i]))
return PTR_ERR(spi_gpio->cs_gpios[i]);
}
return 0;
}
......@@ -339,142 +312,134 @@ static const struct of_device_id spi_gpio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
static int spi_gpio_probe_dt(struct platform_device *pdev)
static int spi_gpio_probe_dt(struct platform_device *pdev,
struct spi_master *master)
{
int ret;
u32 tmp;
struct spi_gpio_platform_data *pdata;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(spi_gpio_dt_ids, &pdev->dev);
if (!of_id)
return 0;
master->dev.of_node = pdev->dev.of_node;
master->use_gpio_descriptors = true;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
return 0;
}
#else
static inline int spi_gpio_probe_dt(struct platform_device *pdev,
struct spi_master *master)
{
return 0;
}
#endif
static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_master *master)
{
struct device *dev = &pdev->dev;
struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
struct spi_gpio *spi_gpio = spi_master_get_devdata(master);
int i;
ret = of_property_read_u32(np, "num-chipselects", &tmp);
if (ret < 0) {
dev_err(&pdev->dev, "num-chipselects property not found\n");
goto error_free;
}
#ifdef GENERIC_BITBANG
if (!pdata || !pdata->num_chipselect)
return -ENODEV;
#endif
/*
* The master needs to think there is a chipselect even if not
* connected
*/
master->num_chipselect = pdata->num_chipselect ?: 1;
pdata->num_chipselect = tmp;
pdev->dev.platform_data = pdata;
spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
if (!spi_gpio->cs_gpios)
return -ENOMEM;
return 1;
for (i = 0; i < master->num_chipselect; i++) {
spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
GPIOD_OUT_HIGH);
if (IS_ERR(spi_gpio->cs_gpios[i]))
return PTR_ERR(spi_gpio->cs_gpios[i]);
}
error_free:
devm_kfree(&pdev->dev, pdata);
return ret;
return 0;
}
#else
static inline int spi_gpio_probe_dt(struct platform_device *pdev)
static void spi_gpio_put(void *data)
{
return 0;
spi_master_put(data);
}
#endif
static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
u16 master_flags = 0;
bool use_of = 0;
struct device *dev = &pdev->dev;
struct spi_bitbang *bb;
const struct of_device_id *of_id;
status = spi_gpio_probe_dt(pdev);
if (status < 0)
return status;
if (status > 0)
use_of = 1;
pdata = dev_get_platdata(&pdev->dev);
#ifdef GENERIC_BITBANG
if (!pdata || (!use_of && !pdata->num_chipselect))
return -ENODEV;
#endif
of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio));
master = spi_alloc_master(dev, sizeof(*spi_gpio));
if (!master)
return -ENOMEM;
spi_gpio = spi_master_get_devdata(master);
spi_gpio->cs_gpios = devm_kcalloc(&pdev->dev,
pdata->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
if (!spi_gpio->cs_gpios)
return -ENOMEM;
status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
if (status)
return status;
platform_set_drvdata(pdev, spi_gpio);
if (of_id)
status = spi_gpio_probe_dt(pdev, master);
else
status = spi_gpio_probe_pdata(pdev, master);
/* Determine if we have chip selects connected */
spi_gpio->has_cs = !!pdata->num_chipselect;
if (status)
return status;
spi_gpio->pdev = pdev;
if (pdata)
spi_gpio->pdata = *pdata;
spi_gpio = spi_master_get_devdata(master);
status = spi_gpio_request(&pdev->dev, spi_gpio,
pdata->num_chipselect, &master_flags);
status = spi_gpio_request(dev, spi_gpio);
if (status)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
SPI_CS_HIGH;
master->flags = master_flags;
if (!spi_gpio->mosi) {
/* HW configuration without MOSI pin
*
* No setting SPI_MASTER_NO_RX here - if there is only
* a MOSI pin connected the host can still do RX by
* changing the direction of the line.
*/
master->flags = SPI_MASTER_NO_TX;
}
master->bus_num = pdev->id;
/* The master needs to think there is a chipselect even if not connected */
master->num_chipselect = spi_gpio->has_cs ? pdata->num_chipselect : 1;
master->setup = spi_gpio_setup;
master->cleanup = spi_gpio_cleanup;
#ifdef CONFIG_OF
master->dev.of_node = pdev->dev.of_node;
#endif
spi_gpio->bitbang.master = master;
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
bb = &spi_gpio->bitbang;
bb->master = master;
bb->chipselect = spi_gpio_chipselect;
bb->set_line_direction = spi_gpio_set_direction;
if ((master_flags & SPI_MASTER_NO_TX) == 0) {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
if (master->flags & SPI_MASTER_NO_TX) {
bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
} else {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
}
spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
bb->setup_transfer = spi_bitbang_setup_transfer;
status = spi_bitbang_start(&spi_gpio->bitbang);
status = spi_bitbang_init(&spi_gpio->bitbang);
if (status)
spi_master_put(master);
return status;
}
static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
spi_gpio = platform_get_drvdata(pdev);
/* stop() unregisters child devices too */
spi_bitbang_stop(&spi_gpio->bitbang);
spi_master_put(spi_gpio->bitbang.master);
return status;
return 0;
return devm_spi_register_master(&pdev->dev, spi_master_get(master));
}
MODULE_ALIAS("platform:" DRIVER_NAME);
......@@ -485,7 +450,6 @@ static struct platform_driver spi_gpio_driver = {
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
.remove = spi_gpio_remove,
};
module_platform_driver(spi_gpio_driver);
......
......@@ -28,6 +28,10 @@
#define DRIVER_NAME "spi_imx"
static bool use_dma = true;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
#define MXC_CSPIRXDATA 0x00
#define MXC_CSPITXDATA 0x04
#define MXC_CSPICTRL 0x08
......@@ -219,6 +223,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
if (!use_dma)
return false;
if (!master->dma_rx)
return false;
......
......@@ -135,8 +135,8 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
return -ENOTSUPP;
}
static bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
return false;
......@@ -622,7 +622,7 @@ void devm_spi_mem_dirmap_destroy(struct device *dev,
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
/**
* spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
* spi_mem_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start reading from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
......@@ -668,7 +668,7 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
/**
* spi_mem_dirmap_dirmap_write() - Write data through a direct mapping
* spi_mem_dirmap_write() - Write data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start writing from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
......
// SPDX-License-Identifier: GPL-2.0
/*
* spi-mt7621.c -- MediaTek MT7621 SPI controller driver
*
* Copyright (C) 2011 Sergiy <piratfm@gmail.com>
* Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
*
* Some parts are based on spi-orion.c:
* Author: Shadi Ammouri <shadi@marvell.com>
* Copyright (C) 2007-2008 Marvell Ltd.
*/
//
// spi-mt7621.c -- MediaTek MT7621 SPI controller driver
//
// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
//
// Some parts are based on spi-orion.c:
// Author: Shadi Ammouri <shadi@marvell.com>
// Copyright (C) 2007-2008 Marvell Ltd.
#include <linux/clk.h>
#include <linux/delay.h>
......@@ -52,7 +51,7 @@
#define MT7621_LSB_FIRST BIT(3)
struct mt7621_spi {
struct spi_master *master;
struct spi_controller *master;
void __iomem *base;
unsigned int sys_freq;
unsigned int speed;
......@@ -64,7 +63,7 @@ struct mt7621_spi {
static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
{
return spi_master_get_devdata(spi->master);
return spi_controller_get_devdata(spi->master);
}
static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg)
......@@ -77,29 +76,25 @@ static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
iowrite32(val, rs->base + reg);
}
static void mt7621_spi_reset(struct mt7621_spi *rs)
static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
{
u32 master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
int cs = spi->chip_select;
u32 polar = 0;
u32 master;
/*
* Select SPI device 7, enable "more buffer mode" and disable
* full-duplex (only half-duplex really works on this chip
* reliably)
*/
master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
master |= MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE;
master &= ~MASTER_FULL_DUPLEX;
mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
rs->pending_write = 0;
}
static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
{
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
int cs = spi->chip_select;
u32 polar = 0;
rs->pending_write = 0;
mt7621_spi_reset(rs);
if (enable)
polar = BIT(cs);
mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
......@@ -163,13 +158,14 @@ static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
int rx_len, u8 *buf)
{
int tx_len;
/*
* Combine with any pending write, and perform one or more half-duplex
* transactions reading 'len' bytes. Data to be written is already in
* MT7621_SPI_DATA.
*/
int tx_len = rs->pending_write;
tx_len = rs->pending_write;
rs->pending_write = 0;
while (rx_len || tx_len) {
......@@ -209,8 +205,8 @@ static inline void mt7621_spi_flush(struct mt7621_spi *rs)
static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
int tx_len, const u8 *buf)
{
int val = 0;
int len = rs->pending_write;
int val = 0;
if (len & 3) {
val = mt7621_spi_read(rs, MT7621_SPI_OPCODE + (len & ~3));
......@@ -238,6 +234,7 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
tx_len -= 1;
}
if (len & 3) {
if (len < 4) {
val = swab32(val);
......@@ -245,13 +242,14 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
mt7621_spi_write(rs, MT7621_SPI_OPCODE + (len & ~3), val);
}
rs->pending_write = len;
}
static int mt7621_spi_transfer_one_message(struct spi_master *master,
static int mt7621_spi_transfer_one_message(struct spi_controller *master,
struct spi_message *m)
{
struct mt7621_spi *rs = spi_master_get_devdata(master);
struct mt7621_spi *rs = spi_controller_get_devdata(master);
struct spi_device *spi = m->spi;
unsigned int speed = spi->max_speed_hz;
struct spi_transfer *t = NULL;
......@@ -268,11 +266,14 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
}
/* Assert CS */
mt7621_spi_set_cs(spi, 1);
m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((t->rx_buf) && (t->tx_buf)) {
/* This controller will shift some extra data out
/*
* This controller will shift some extra data out
* of spi_opcode if (mosi_bit_cnt > 0) &&
* (cmd_bit_cnt == 0). So the claimed full-duplex
* support is broken since we have no way to read
......@@ -287,8 +288,9 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
}
m->actual_length += t->len;
}
mt7621_spi_flush(rs);
/* Flush data and deassert CS */
mt7621_spi_flush(rs);
mt7621_spi_set_cs(spi, 0);
msg_done:
......@@ -303,7 +305,7 @@ static int mt7621_spi_setup(struct spi_device *spi)
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
if ((spi->max_speed_hz == 0) ||
(spi->max_speed_hz > (rs->sys_freq / 2)))
(spi->max_speed_hz > (rs->sys_freq / 2)))
spi->max_speed_hz = (rs->sys_freq / 2);
if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
......@@ -324,7 +326,7 @@ MODULE_DEVICE_TABLE(of, mt7621_spi_match);
static int mt7621_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct spi_master *master;
struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
struct resource *r;
......@@ -361,7 +363,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
}
master->mode_bits = SPI_LSB_FIRST;
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = mt7621_spi_setup;
master->transfer_one_message = mt7621_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
......@@ -370,7 +372,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, master);
rs = spi_master_get_devdata(master);
rs = spi_controller_get_devdata(master);
rs->base = base;
rs->clk = clk;
rs->master = master;
......@@ -385,21 +387,18 @@ static int mt7621_spi_probe(struct platform_device *pdev)
return ret;
}
mt7621_spi_reset(rs);
return spi_register_master(master);
return devm_spi_register_controller(&pdev->dev, master);
}
static int mt7621_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct spi_controller *master;
struct mt7621_spi *rs;
master = dev_get_drvdata(&pdev->dev);
rs = spi_master_get_devdata(master);
rs = spi_controller_get_devdata(master);
clk_disable(rs->clk);
spi_unregister_master(master);
clk_disable_unprepare(rs->clk);
return 0;
}
......
......@@ -492,8 +492,7 @@ static int mxic_spi_transfer_one(struct spi_master *master,
static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
mxic_spi_clk_disable(mxic);
......@@ -504,8 +503,7 @@ static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
int ret;
......
......@@ -470,6 +470,8 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
goto out;
count--;
if (xfer->word_delay_usecs)
udelay(xfer->word_delay_usecs);
} while (count);
} else if (word_len == 16) {
const u16 *tx = xfer->tx_buf;
......@@ -479,6 +481,8 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
goto out;
count -= 2;
if (xfer->word_delay_usecs)
udelay(xfer->word_delay_usecs);
} while (count);
}
......
......@@ -239,13 +239,15 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
u32 *threshold)
{
struct pxa2xx_spi_chip *chip_info = spi->controller_data;
struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
/*
* If the DMA burst size is given in chip_info we use that,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
*burst_code = chip_info ? chip_info->dma_burst_size : 1;
*burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);
......
......@@ -5,7 +5,6 @@
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
......@@ -35,6 +34,8 @@ struct pxa_spi_info {
void *tx_param;
void *rx_param;
int dma_burst_size;
int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c);
};
......@@ -133,6 +134,7 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
rx->dma_dev = &dma_dev->dev;
c->dma_filter = lpss_dma_filter;
c->dma_burst_size = 8;
return 0;
}
......@@ -223,6 +225,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
spi_pdata.tx_param = c->tx_param;
spi_pdata.rx_param = c->rx_param;
spi_pdata.enable_dma = c->rx_param && c->tx_param;
spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
......
......@@ -929,7 +929,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
struct spi_message *message = controller->cur_msg;
struct chip_data *chip = spi_get_ctldata(message->spi);
struct chip_data *chip = spi_get_ctldata(spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
......@@ -947,21 +947,21 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&drv_data->pdev->dev,
dev_err(&spi->dev,
"Mapped transfer length of %u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
return -EINVAL;
}
/* warn ... we force this to PIO mode */
dev_warn_ratelimited(&message->spi->dev,
dev_warn_ratelimited(&spi->dev,
"DMA disabled for transfer length %ld greater than %d\n",
(long)transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
if (pxa2xx_spi_flush(drv_data) == 0) {
dev_err(&drv_data->pdev->dev, "Flush failed\n");
dev_err(&spi->dev, "Flush failed\n");
return -EIO;
}
drv_data->n_bytes = chip->n_bytes;
......@@ -1003,15 +1003,15 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
message->spi,
spi,
bits, &dma_burst,
&dma_thresh))
dev_warn_ratelimited(&message->spi->dev,
dev_warn_ratelimited(&spi->dev,
"DMA burst size reduced to match bits_per_word\n");
}
dma_mapped = controller->can_dma &&
controller->can_dma(controller, message->spi, transfer) &&
controller->can_dma(controller, spi, transfer) &&
controller->cur_msg_mapped;
if (dma_mapped) {
......@@ -1039,12 +1039,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
......@@ -1337,6 +1337,9 @@ static int setup(struct spi_device *spi)
dev_warn(&spi->dev,
"in setup: DMA burst size reduced to match bits_per_word\n");
}
dev_dbg(&spi->dev,
"in setup: DMA burst size set to %u\n",
chip->dma_burst_size);
}
switch (drv_data->ssp_type) {
......@@ -1455,6 +1458,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
/* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
{ },
};
......@@ -1568,6 +1575,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
pdata->dma_burst_size = 1;
return pdata;
}
......@@ -1696,7 +1704,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (platform_info->enable_dma) {
status = pxa2xx_spi_dma_setup(drv_data);
if (status) {
dev_dbg(dev, "no DMA channels available, using PIO\n");
dev_warn(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
controller->can_dma = pxa2xx_spi_can_dma;
......@@ -1957,3 +1965,5 @@ static void __exit pxa2xx_spi_exit(void)
platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);
MODULE_SOFTDEP("pre: dw_dmac");
......@@ -18,6 +18,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
......@@ -82,111 +83,113 @@ struct sh_msiof_spi_priv {
#define RFDR 0x60 /* Receive FIFO Data Register */
/* TMDR1 and RMDR1 */
#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
#define MDR1_FLD_SHIFT 2
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
#define MDR1_FLD_SHIFT 2
#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
/* TSCR and RSCR */
#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
#define SCR_BRPS(i) (((i) - 1) << 8)
#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
#define SCR_BRDV_DIV_2 0x0000
#define SCR_BRDV_DIV_4 0x0001
#define SCR_BRDV_DIV_8 0x0002
#define SCR_BRDV_DIV_16 0x0003
#define SCR_BRDV_DIV_32 0x0004
#define SCR_BRDV_DIV_1 0x0007
#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
#define SCR_BRDV_DIV_2 0
#define SCR_BRDV_DIV_4 1
#define SCR_BRDV_DIV_8 2
#define SCR_BRDV_DIV_16 3
#define SCR_BRDV_DIV_32 4
#define SCR_BRDV_DIV_1 7
/* CTR */
#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
#define CTR_TXE 0x00000200 /* Transmit Enable */
#define CTR_RXE 0x00000100 /* Receive Enable */
#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
#define CTR_TXE BIT(9) /* Transmit Enable */
#define CTR_RXE BIT(8) /* Receive Enable */
#define CTR_TXRST BIT(1) /* Transmit Reset */
#define CTR_RXRST BIT(0) /* Receive Reset */
/* FCTR */
#define FCTR_TFWM_MASK 0xe0000000 /* Transmit FIFO Watermark */
#define FCTR_TFWM_64 0x00000000 /* Transfer Request when 64 empty stages */
#define FCTR_TFWM_32 0x20000000 /* Transfer Request when 32 empty stages */
#define FCTR_TFWM_24 0x40000000 /* Transfer Request when 24 empty stages */
#define FCTR_TFWM_16 0x60000000 /* Transfer Request when 16 empty stages */
#define FCTR_TFWM_12 0x80000000 /* Transfer Request when 12 empty stages */
#define FCTR_TFWM_8 0xa0000000 /* Transfer Request when 8 empty stages */
#define FCTR_TFWM_4 0xc0000000 /* Transfer Request when 4 empty stages */
#define FCTR_TFWM_1 0xe0000000 /* Transfer Request when 1 empty stage */
#define FCTR_TFUA_MASK 0x07f00000 /* Transmit FIFO Usable Area */
#define FCTR_TFUA_SHIFT 20
#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
#define FCTR_TFUA_SHIFT 20
#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
#define FCTR_RFWM_MASK 0x0000e000 /* Receive FIFO Watermark */
#define FCTR_RFWM_1 0x00000000 /* Transfer Request when 1 valid stages */
#define FCTR_RFWM_4 0x00002000 /* Transfer Request when 4 valid stages */
#define FCTR_RFWM_8 0x00004000 /* Transfer Request when 8 valid stages */
#define FCTR_RFWM_16 0x00006000 /* Transfer Request when 16 valid stages */
#define FCTR_RFWM_32 0x00008000 /* Transfer Request when 32 valid stages */
#define FCTR_RFWM_64 0x0000a000 /* Transfer Request when 64 valid stages */
#define FCTR_RFWM_128 0x0000c000 /* Transfer Request when 128 valid stages */
#define FCTR_RFWM_256 0x0000e000 /* Transfer Request when 256 valid stages */
#define FCTR_RFUA_MASK 0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
#define FCTR_RFUA_SHIFT 4
#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
#define FCTR_RFUA_SHIFT 4
#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
/* STR */
#define STR_TFEMP 0x20000000 /* Transmit FIFO Empty */
#define STR_TDREQ 0x10000000 /* Transmit Data Transfer Request */
#define STR_TEOF 0x00800000 /* Frame Transmission End */
#define STR_TFSERR 0x00200000 /* Transmit Frame Synchronization Error */
#define STR_TFOVF 0x00100000 /* Transmit FIFO Overflow */
#define STR_TFUDF 0x00080000 /* Transmit FIFO Underflow */
#define STR_RFFUL 0x00002000 /* Receive FIFO Full */
#define STR_RDREQ 0x00001000 /* Receive Data Transfer Request */
#define STR_REOF 0x00000080 /* Frame Reception End */
#define STR_RFSERR 0x00000020 /* Receive Frame Synchronization Error */
#define STR_RFUDF 0x00000010 /* Receive FIFO Underflow */
#define STR_RFOVF 0x00000008 /* Receive FIFO Overflow */
#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
#define STR_TEOF BIT(23) /* Frame Transmission End */
#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
#define STR_RFFUL BIT(13) /* Receive FIFO Full */
#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
#define STR_REOF BIT(7) /* Frame Reception End */
#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
/* IER */
#define IER_TDMAE 0x80000000 /* Transmit Data DMA Transfer Req. Enable */
#define IER_TFEMPE 0x20000000 /* Transmit FIFO Empty Enable */
#define IER_TDREQE 0x10000000 /* Transmit Data Transfer Request Enable */
#define IER_TEOFE 0x00800000 /* Frame Transmission End Enable */
#define IER_TFSERRE 0x00200000 /* Transmit Frame Sync Error Enable */
#define IER_TFOVFE 0x00100000 /* Transmit FIFO Overflow Enable */
#define IER_TFUDFE 0x00080000 /* Transmit FIFO Underflow Enable */
#define IER_RDMAE 0x00008000 /* Receive Data DMA Transfer Req. Enable */
#define IER_RFFULE 0x00002000 /* Receive FIFO Full Enable */
#define IER_RDREQE 0x00001000 /* Receive Data Transfer Request Enable */
#define IER_REOFE 0x00000080 /* Frame Reception End Enable */
#define IER_RFSERRE 0x00000020 /* Receive Frame Sync Error Enable */
#define IER_RFUDFE 0x00000010 /* Receive FIFO Underflow Enable */
#define IER_RFOVFE 0x00000008 /* Receive FIFO Overflow Enable */
#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
#define IER_REOFE BIT(7) /* Frame Reception End Enable */
#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
......@@ -219,21 +222,14 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
{
u32 mask = clr | set;
u32 data;
int k;
data = sh_msiof_read(p, CTR);
data &= ~clr;
data |= set;
sh_msiof_write(p, CTR, data);
for (k = 100; k > 0; k--) {
if ((sh_msiof_read(p, CTR) & mask) == set)
break;
udelay(10);
}
return k > 0 ? 0 : -ETIMEDOUT;
return readl_poll_timeout_atomic(p->mapbase + CTR, data,
(data & mask) == set, 10, 1000);
}
static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
......@@ -247,6 +243,19 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
return IRQ_HANDLED;
}
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
{
u32 mask = CTR_TXRST | CTR_RXRST;
u32 data;
data = sh_msiof_read(p, CTR);
data |= mask;
sh_msiof_write(p, CTR, data);
readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
100);
}
static const u32 sh_msiof_spi_div_array[] = {
SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
......@@ -540,25 +549,11 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->controller->dev.of_node;
struct sh_msiof_spi_priv *p =
spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
if (!np) {
/*
* Use spi->controller_data for CS (same strategy as spi_gpio),
* if any. otherwise let HW control CS
*/
spi->cs_gpio = (uintptr_t)spi->controller_data;
}
if (gpio_is_valid(spi->cs_gpio)) {
gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
return 0;
}
if (spi_controller_is_slave(p->ctlr))
if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
return 0;
if (p->native_cs_inited &&
......@@ -591,7 +586,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
u32 ss, cs_high;
/* Configure pins before asserting CS */
if (gpio_is_valid(spi->cs_gpio)) {
if (spi->cs_gpiod) {
ss = p->unused_ss;
cs_high = p->native_cs_high;
} else {
......@@ -926,6 +921,9 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
bool swab;
int ret;
/* reset registers */
sh_msiof_spi_reset_regs(p);
/* setup clocks (clock already enabled in chipselect()) */
if (!spi_controller_is_slave(p->ctlr))
sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
......@@ -1144,6 +1142,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
if (!IS_ERR(gpiod)) {
devm_gpiod_put(dev, gpiod);
cs_gpios++;
continue;
}
......@@ -1395,6 +1394,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ctlr->use_gpio_descriptors = true;
ret = sh_msiof_request_dma(p);
if (ret < 0)
......
......@@ -5,6 +5,8 @@
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/iopoll.h>
......@@ -13,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
......@@ -83,6 +86,7 @@
#define STM32_FIFO_TIMEOUT_US 30000
#define STM32_BUSY_TIMEOUT_US 100000
#define STM32_ABT_TIMEOUT_US 100000
#define STM32_COMP_TIMEOUT_MS 1000
struct stm32_qspi_flash {
struct stm32_qspi *qspi;
......@@ -92,6 +96,8 @@ struct stm32_qspi_flash {
struct stm32_qspi {
struct device *dev;
struct spi_controller *ctrl;
phys_addr_t phys_base;
void __iomem *io_base;
void __iomem *mm_base;
resource_size_t mm_size;
......@@ -101,6 +107,13 @@ struct stm32_qspi {
struct completion data_completion;
u32 fmode;
struct dma_chan *dma_chtx;
struct dma_chan *dma_chrx;
struct completion dma_completion;
u32 cr_reg;
u32 dcr_reg;
/*
* to protect device configuration, could be different between
* 2 flash access (bk1, bk2)
......@@ -176,6 +189,81 @@ static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
return 0;
}
static void stm32_qspi_dma_callback(void *arg)
{
struct completion *dma_completion = arg;
complete(dma_completion);
}
static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
struct dma_async_tx_descriptor *desc;
enum dma_transfer_direction dma_dir;
struct dma_chan *dma_ch;
struct sg_table sgt;
dma_cookie_t cookie;
u32 cr, t_out;
int err;
if (op->data.dir == SPI_MEM_DATA_IN) {
dma_dir = DMA_DEV_TO_MEM;
dma_ch = qspi->dma_chrx;
} else {
dma_dir = DMA_MEM_TO_DEV;
dma_ch = qspi->dma_chtx;
}
/*
* spi_map_buf return -EINVAL if the buffer is not DMA-able
* (DMA-able: in vmalloc | kmap | virt_addr_valid)
*/
err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
if (err)
return err;
desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
dma_dir, DMA_PREP_INTERRUPT);
if (!desc) {
err = -ENOMEM;
goto out_unmap;
}
cr = readl_relaxed(qspi->io_base + QSPI_CR);
reinit_completion(&qspi->dma_completion);
desc->callback = stm32_qspi_dma_callback;
desc->callback_param = &qspi->dma_completion;
cookie = dmaengine_submit(desc);
err = dma_submit_error(cookie);
if (err)
goto out;
dma_async_issue_pending(dma_ch);
writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
if (!wait_for_completion_interruptible_timeout(&qspi->dma_completion,
msecs_to_jiffies(t_out)))
err = -ETIMEDOUT;
if (dma_async_is_tx_complete(dma_ch, cookie,
NULL, NULL) != DMA_COMPLETE)
err = -ETIMEDOUT;
if (err)
dmaengine_terminate_all(dma_ch);
out:
writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
out_unmap:
spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
return err;
}
static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
{
if (!op->data.nbytes)
......@@ -183,6 +271,10 @@ static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
if (qspi->fmode == CCR_FMODE_MM)
return stm32_qspi_tx_mm(qspi, op);
else if ((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
(op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx))
if (!stm32_qspi_tx_dma(qspi, op))
return 0;
return stm32_qspi_tx_poll(qspi, op);
}
......@@ -213,7 +305,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_interruptible_timeout(&qspi->data_completion,
msecs_to_jiffies(1000))) {
msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
err = -ETIMEDOUT;
} else {
sr = readl_relaxed(qspi->io_base + QSPI_SR);
......@@ -355,7 +447,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
u32 cr, presc;
u32 presc;
if (ctrl->busy)
return -EBUSY;
......@@ -371,16 +463,60 @@ static int stm32_qspi_setup(struct spi_device *spi)
flash->presc = presc;
mutex_lock(&qspi->lock);
cr = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_SSHIFT | CR_EN;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
qspi->cr_reg = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_SSHIFT | CR_EN;
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
writel_relaxed(DCR_FSIZE_MASK, qspi->io_base + QSPI_DCR);
qspi->dcr_reg = DCR_FSIZE_MASK;
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
mutex_unlock(&qspi->lock);
return 0;
}
static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
{
struct dma_slave_config dma_cfg;
struct device *dev = qspi->dev;
memset(&dma_cfg, 0, sizeof(dma_cfg));
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
dma_cfg.src_maxburst = 4;
dma_cfg.dst_maxburst = 4;
qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
if (qspi->dma_chrx) {
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
dev_err(dev, "dma rx config failed\n");
dma_release_channel(qspi->dma_chrx);
qspi->dma_chrx = NULL;
}
}
qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
if (qspi->dma_chtx) {
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
dev_err(dev, "dma tx config failed\n");
dma_release_channel(qspi->dma_chtx);
qspi->dma_chtx = NULL;
}
}
init_completion(&qspi->dma_completion);
}
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
{
if (qspi->dma_chtx)
dma_release_channel(qspi->dma_chtx);
if (qspi->dma_chrx)
dma_release_channel(qspi->dma_chrx);
}
/*
* no special host constraint, so use default spi_mem_default_supports_op
* to check supported mode.
......@@ -393,8 +529,10 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
{
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
clk_disable_unprepare(qspi->clk);
spi_master_put(qspi->ctrl);
}
static int stm32_qspi_probe(struct platform_device *pdev)
......@@ -411,43 +549,62 @@ static int stm32_qspi_probe(struct platform_device *pdev)
return -ENOMEM;
qspi = spi_controller_get_devdata(ctrl);
qspi->ctrl = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->io_base))
return PTR_ERR(qspi->io_base);
if (IS_ERR(qspi->io_base)) {
ret = PTR_ERR(qspi->io_base);
goto err;
}
qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->mm_base))
return PTR_ERR(qspi->mm_base);
if (IS_ERR(qspi->mm_base)) {
ret = PTR_ERR(qspi->mm_base);
goto err;
}
qspi->mm_size = resource_size(res);
if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
return -EINVAL;
if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
ret = -EINVAL;
goto err;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
if (irq != -EPROBE_DEFER)
dev_err(dev, "IRQ error missing or invalid\n");
return irq;
}
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
goto err;
}
init_completion(&qspi->data_completion);
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk))
return PTR_ERR(qspi->clk);
if (IS_ERR(qspi->clk)) {
ret = PTR_ERR(qspi->clk);
goto err;
}
qspi->clk_rate = clk_get_rate(qspi->clk);
if (!qspi->clk_rate)
return -EINVAL;
if (!qspi->clk_rate) {
ret = -EINVAL;
goto err;
}
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
goto err;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
......@@ -459,6 +616,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
stm32_qspi_dma_setup(qspi);
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
......@@ -470,14 +628,11 @@ static int stm32_qspi_probe(struct platform_device *pdev)
ctrl->dev.of_node = dev->of_node;
ret = devm_spi_register_master(dev, ctrl);
if (ret)
goto err_spi_register;
return 0;
if (!ret)
return 0;
err_spi_register:
err:
stm32_qspi_release(qspi);
return ret;
}
......@@ -489,6 +644,31 @@ static int stm32_qspi_remove(struct platform_device *pdev)
return 0;
}
static int __maybe_unused stm32_qspi_suspend(struct device *dev)
{
struct stm32_qspi *qspi = dev_get_drvdata(dev);
clk_disable_unprepare(qspi->clk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int __maybe_unused stm32_qspi_resume(struct device *dev)
{
struct stm32_qspi *qspi = dev_get_drvdata(dev);
pinctrl_pm_select_default_state(dev);
clk_prepare_enable(qspi->clk);
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
return 0;
}
static SIMPLE_DEV_PM_OPS(stm32_qspi_pm_ops, stm32_qspi_suspend, stm32_qspi_resume);
static const struct of_device_id stm32_qspi_match[] = {
{.compatible = "st,stm32f469-qspi"},
{}
......@@ -501,6 +681,7 @@ static struct platform_driver stm32_qspi_driver = {
.driver = {
.name = "stm32-qspi",
.of_match_table = stm32_qspi_match,
.pm = &stm32_qspi_pm_ops,
},
};
module_platform_driver(stm32_qspi_driver);
......
......@@ -149,6 +149,8 @@
#define SPI_TX_FIFO 0x108
#define SPI_RX_FIFO 0x188
#define SPI_INTR_MASK 0x18c
#define SPI_INTR_ALL_MASK (0x1fUL << 25)
#define MAX_CHIP_SELECT 4
#define SPI_FIFO_DEPTH 64
#define DATA_DIR_TX (1 << 0)
......@@ -161,6 +163,10 @@
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
struct tegra_spi_soc_data {
bool has_intr_mask_reg;
};
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
......@@ -211,6 +217,7 @@ struct tegra_spi_data {
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
const struct tegra_spi_soc_data *soc_data;
};
static int tegra_spi_runtime_suspend(struct device *dev);
......@@ -259,7 +266,8 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if (bits_per_word == 8 || bits_per_word == 16) {
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
tspi->is_packed = 1;
tspi->words_per_32bit = 32/bits_per_word;
} else {
......@@ -553,11 +561,13 @@ static int tegra_spi_start_dma_based_transfer(
dma_burst = 8;
}
if (tspi->cur_direction & DATA_DIR_TX)
val |= SPI_IE_TX;
if (!tspi->soc_data->has_intr_mask_reg) {
if (tspi->cur_direction & DATA_DIR_TX)
val |= SPI_IE_TX;
if (tspi->cur_direction & DATA_DIR_RX)
val |= SPI_IE_RX;
if (tspi->cur_direction & DATA_DIR_RX)
val |= SPI_IE_RX;
}
tegra_spi_writel(tspi, val, SPI_DMA_CTL);
tspi->dma_control_reg = val;
......@@ -749,6 +759,16 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
else if (req_mode == SPI_MODE_3)
command1 |= SPI_CONTROL_MODE_3;
if (spi->mode & SPI_LSB_FIRST)
command1 |= SPI_LSBIT_FE;
else
command1 &= ~SPI_LSBIT_FE;
if (spi->mode & SPI_3WIRE)
command1 |= SPI_BIDIROE;
else
command1 &= ~SPI_BIDIROE;
if (tspi->cs_control) {
if (tspi->cs_control != spi)
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
......@@ -781,6 +801,11 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
command1 |= SPI_BOTH_EN_BIT;
else
command1 &= ~SPI_BOTH_EN_BIT;
if (tspi->is_packed)
command1 |= SPI_PACKED;
else
......@@ -832,6 +857,12 @@ static int tegra_spi_setup(struct spi_device *spi)
return ret;
}
if (tspi->soc_data->has_intr_mask_reg) {
val = tegra_spi_readl(tspi, SPI_INTR_MASK);
val &= ~SPI_INTR_ALL_MASK;
tegra_spi_writel(tspi, val, SPI_INTR_MASK);
}
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
......@@ -870,6 +901,20 @@ static void tegra_spi_transfer_end(struct spi_device *spi)
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
}
static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
{
dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
dev_dbg(tspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
tegra_spi_readl(tspi, SPI_COMMAND1),
tegra_spi_readl(tspi, SPI_COMMAND2));
dev_dbg(tspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
tegra_spi_readl(tspi, SPI_DMA_CTL),
tegra_spi_readl(tspi, SPI_DMA_BLK));
dev_dbg(tspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
tegra_spi_readl(tspi, SPI_TRANS_STATUS),
tegra_spi_readl(tspi, SPI_FIFO_STATUS));
}
static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
......@@ -916,6 +961,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
(tspi->cur_direction & DATA_DIR_RX))
dmaengine_terminate_all(tspi->rx_dma_chan);
ret = -EIO;
tegra_spi_dump_regs(tspi);
tegra_spi_flush_fifos(tspi);
reset_control_assert(tspi->rst);
udelay(2);
......@@ -926,6 +972,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "Error in Transfer\n");
ret = -EIO;
tegra_spi_dump_regs(tspi);
goto complete_xfer;
}
msg->actual_length += xfer->len;
......@@ -967,6 +1014,7 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
tegra_spi_dump_regs(tspi);
tegra_spi_flush_fifos(tspi);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
......@@ -1041,6 +1089,7 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
tegra_spi_dump_regs(tspi);
tegra_spi_flush_fifos(tspi);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
......@@ -1102,8 +1151,29 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
static struct tegra_spi_soc_data tegra114_spi_soc_data = {
.has_intr_mask_reg = false,
};
static struct tegra_spi_soc_data tegra124_spi_soc_data = {
.has_intr_mask_reg = false,
};
static struct tegra_spi_soc_data tegra210_spi_soc_data = {
.has_intr_mask_reg = true,
};
static const struct of_device_id tegra_spi_of_match[] = {
{ .compatible = "nvidia,tegra114-spi", },
{
.compatible = "nvidia,tegra114-spi",
.data = &tegra114_spi_soc_data,
}, {
.compatible = "nvidia,tegra124-spi",
.data = &tegra124_spi_soc_data,
}, {
.compatible = "nvidia,tegra210-spi",
.data = &tegra210_spi_soc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
......@@ -1114,6 +1184,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
struct tegra_spi_data *tspi;
struct resource *r;
int ret, spi_irq;
int bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
......@@ -1128,16 +1199,28 @@ static int tegra_spi_probe(struct platform_device *pdev)
master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = tegra_spi_setup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
master->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
master->bus_num = bus_num;
tspi->master = master;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
tspi->soc_data = of_device_get_match_data(&pdev->dev);
if (!tspi->soc_data) {
dev_err(&pdev->dev, "unsupported tegra\n");
ret = -ENODEV;
goto exit_free_master;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tspi->base)) {
......
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Xilinx, Inc.
*
* Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/spi/spi-mem.h>
/* Register offset definitions */
#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
#define ZYNQ_QSPI_STATUS_OFFSET 0x04 /* Interrupt Status Register, RO */
#define ZYNQ_QSPI_IEN_OFFSET 0x08 /* Interrupt Enable Register, WO */
#define ZYNQ_QSPI_IDIS_OFFSET 0x0C /* Interrupt Disable Reg, WO */
#define ZYNQ_QSPI_IMASK_OFFSET 0x10 /* Interrupt Enabled Mask Reg,RO */
#define ZYNQ_QSPI_ENABLE_OFFSET 0x14 /* Enable/Disable Register, RW */
#define ZYNQ_QSPI_DELAY_OFFSET 0x18 /* Delay Register, RW */
#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C /* Transmit 4-byte inst, WO */
#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80 /* Transmit 1-byte inst, WO */
#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84 /* Transmit 2-byte inst, WO */
#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88 /* Transmit 3-byte inst, WO */
#define ZYNQ_QSPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
#define ZYNQ_QSPI_SIC_OFFSET 0x24 /* Slave Idle Count Register, RW */
#define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28 /* TX FIFO Watermark Reg, RW */
#define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C /* RX FIFO Watermark Reg, RW */
#define ZYNQ_QSPI_GPIO_OFFSET 0x30 /* GPIO Register, RW */
#define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0 /* Linear Adapter Config Ref, RW */
#define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC /* Module ID Register, RO */
/*
* QSPI Configuration Register bit Masks
*
* This register contains various control bits that effect the operation
* of the QSPI controller
*/
#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31) /* Flash Memory Interface */
#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16) /* Manual TX Start */
#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15) /* Enable Manual TX Mode */
#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14) /* Manual Chip Select */
#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK BIT(10) /* Slave Select Mask */
#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
/*
* QSPI Configuration Register - Baud rate and slave select
*
* These are the values used in the calculation of baud rate divisor and
* setting the slave select.
*/
#define ZYNQ_QSPI_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
#define ZYNQ_QSPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
/*
* QSPI Interrupt Registers bit Masks
*
* All the four interrupt registers (Status/Mask/Enable/Disable) have the same
* bit definitions.
*/
#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0) /* QSPI RX FIFO Overflow */
#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2) /* QSPI TX FIFO Overflow */
#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3) /* QSPI TX FIFO is full */
#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4) /* QSPI RX FIFO Not Empty */
#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5) /* QSPI RX FIFO is full */
#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6) /* QSPI TX FIFO Underflow */
#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
ZYNQ_QSPI_IXR_TXNFULL_MASK | \
ZYNQ_QSPI_IXR_TXFULL_MASK | \
ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
ZYNQ_QSPI_IXR_RXNEMTY_MASK)
/*
* QSPI Enable Register bit Masks
*
* This register is used to enable or disable the QSPI controller
*/
#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0) /* QSPI Enable Bit Mask */
/*
* QSPI Linear Configuration Register
*
* It is named Linear Configuration but it controls other modes when not in
* linear mode also.
*/
#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK BIT(30) /* LQSPI Two memories Mask */
#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK BIT(29) /* LQSPI Separate bus Mask */
#define ZYNQ_QSPI_LCFG_U_PAGE_MASK BIT(28) /* LQSPI Upper Page Mask */
#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
#define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B /* read instruction code */
#define ZYNQ_QSPI_FIFO_DEPTH 63 /* FIFO depth in words */
#define ZYNQ_QSPI_RX_THRESHOLD 32 /* Rx FIFO threshold level */
#define ZYNQ_QSPI_TX_THRESHOLD 1 /* Tx FIFO threshold level */
/*
* The modebits configurable by the driver to make the SPI support different
* data formats
*/
#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
/* Default number of chip selects */
#define ZYNQ_QSPI_DEFAULT_NUM_CS 1
/**
* struct zynq_qspi - Defines qspi driver instance
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
* @irq: IRQ number
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
* @tx_bytes: Number of bytes left to transfer
* @rx_bytes: Number of bytes left to receive
* @data_completion: completion structure
*/
struct zynq_qspi {
struct device *dev;
void __iomem *regs;
struct clk *refclk;
struct clk *pclk;
int irq;
u8 *txbuf;
u8 *rxbuf;
int tx_bytes;
int rx_bytes;
struct completion data_completion;
};
/*
* Inline functions for the QSPI controller read/write
*/
static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
{
return readl_relaxed(xqspi->regs + offset);
}
static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
u32 val)
{
writel_relaxed(val, xqspi->regs + offset);
}
/**
* zynq_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynq_qspi structure
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
* - Master mode
* - Baud rate divisor is set to 2
* - Tx threshold set to 1l Rx threshold set to 32
* - Flash memory interface mode enabled
* - Size of the word to be transferred as 8 bit
* This function performs the following actions
* - Disable and clear all the interrupts
* - Enable manual slave select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the size of the word to be transferred as 32 bit
* - Set the little endian mode of TX FIFO and
* - Enable the QSPI controller
*/
static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
{
u32 config_reg;
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
/* Disable linear mode as the boot loader may have used it */
zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, 0);
/* Clear the RX FIFO */
while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
ZYNQ_QSPI_IXR_RXNEMTY_MASK)
zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
ZYNQ_QSPI_CONFIG_CPOL_MASK |
ZYNQ_QSPI_CONFIG_CPHA_MASK |
ZYNQ_QSPI_CONFIG_BDRATE_MASK |
ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
ZYNQ_QSPI_CONFIG_MANSRT_MASK);
config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
ZYNQ_QSPI_CONFIG_IFMODE_MASK);
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
ZYNQ_QSPI_RX_THRESHOLD);
zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
ZYNQ_QSPI_TX_THRESHOLD);
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
}
static bool zynq_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
/*
* The number of address bytes should be equal to or less than 3 bytes.
*/
if (op->addr.nbytes > 3)
return false;
return true;
}
/**
* zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be read (1..4)
*/
static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
u32 data;
data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
if (xqspi->rxbuf) {
memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
xqspi->rxbuf += size;
}
xqspi->rx_bytes -= size;
if (xqspi->rx_bytes < 0)
xqspi->rx_bytes = 0;
}
/**
* zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be written (1..4)
*/
static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
static const unsigned int offset[4] = {
ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
u32 data;
if (xqspi->txbuf) {
data = 0xffffffff;
memcpy(&data, xqspi->txbuf, size);
xqspi->txbuf += size;
} else {
data = 0;
}
xqspi->tx_bytes -= size;
zynq_qspi_write(xqspi, offset[size - 1], data);
}
/**
* zynq_qspi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
* @assert: 1 for select or 0 for deselect the chip select line
*/
static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
{
struct spi_controller *ctrl = spi->master;
struct zynq_qspi *xqspi = spi_controller_get_devdata(ctrl);
u32 config_reg;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
if (assert) {
/* Select the slave */
config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
config_reg |= (((~(BIT(spi->chip_select))) <<
ZYNQ_QSPI_SS_SHIFT) &
ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
} else {
config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
}
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
}
/**
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
* @xqspi: Pointer to the zynq_qspi structure
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
*
* Return: 0 on success and -EINVAL on invalid input parameter
*
* Note: If the requested frequency is not an exact match with what can be
* obtained using the prescalar value, the driver sets the clock frequency which
* is lower than the requested frequency (maximum lower) for the transfer. If
* the requested frequency is higher or lower than that is supported by the QSPI
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
{
u32 config_reg, baud_rate_val = 0;
/*
* Set the clock frequency
* The baud rate divisor is not a direct mapping to the value written
* into the configuration register (config_reg[5:3])
* i.e. 000 - divide by 2
* 001 - divide by 4
* ----------------
* 111 - divide by 256
*/
while ((baud_rate_val < ZYNQ_QSPI_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
spi->max_speed_hz)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
/* Set the QSPI clock phase and clock polarity */
config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
(~ZYNQ_QSPI_CONFIG_CPOL_MASK);
if (spi->mode & SPI_CPHA)
config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
if (spi->mode & SPI_CPOL)
config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
config_reg |= (baud_rate_val << ZYNQ_QSPI_BAUD_DIV_SHIFT);
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
return 0;
}
/**
* zynq_qspi_setup - Configure the QSPI controller
* @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer, baud
* rate and divisor value to setup the requested qspi clock.
*
* Return: 0 on success and error value on failure
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->master;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctrl);
if (ctrl->busy)
return -EBUSY;
clk_enable(qspi->refclk);
clk_enable(qspi->pclk);
zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
return 0;
}
/**
* zynq_qspi_write_op - Fills the TX FIFO with as many bytes as possible
* @xqspi: Pointer to the zynq_qspi structure
* @txcount: Maximum number of words to write
* @txempty: Indicates that TxFIFO is empty
*/
static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
bool txempty)
{
int count, len, k;
len = xqspi->tx_bytes;
if (len && len < 4) {
/*
* We must empty the TxFIFO between accesses to TXD0,
* TXD1, TXD2, TXD3.
*/
if (txempty)
zynq_qspi_txfifo_op(xqspi, len);
return;
}
count = len / 4;
if (count > txcount)
count = txcount;
if (xqspi->txbuf) {
iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
xqspi->txbuf, count);
xqspi->txbuf += count * 4;
} else {
for (k = 0; k < count; k++)
writel_relaxed(0, xqspi->regs +
ZYNQ_QSPI_TXD_00_00_OFFSET);
}
xqspi->tx_bytes -= count * 4;
}
/**
* zynq_qspi_read_op - Drains the RX FIFO by as many bytes as possible
* @xqspi: Pointer to the zynq_qspi structure
* @rxcount: Maximum number of words to read
*/
static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
{
int count, len, k;
len = xqspi->rx_bytes - xqspi->tx_bytes;
count = len / 4;
if (count > rxcount)
count = rxcount;
if (xqspi->rxbuf) {
ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
xqspi->rxbuf, count);
xqspi->rxbuf += count * 4;
} else {
for (k = 0; k < count; k++)
readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
}
xqspi->rx_bytes -= count * 4;
len -= count * 4;
if (len && len < 4 && count < rxcount)
zynq_qspi_rxfifo_op(xqspi, len);
}
/**
* zynq_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
* This function handles TX empty only.
* On TX empty interrupt this function reads the received data from RX FIFO and
* fills the TX FIFO if there is any data remaining to be transferred.
*
* Return: IRQ_HANDLED when interrupt is handled; IRQ_NONE otherwise.
*/
static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
{
u32 intr_status;
bool txempty;
struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
(intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
/*
* This bit is set when Tx FIFO has < THRESHOLD entries.
* We have the THRESHOLD value set to 1,
* so this bit indicates Tx FIFO is empty.
*/
txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
/* Read out the data from the RX FIFO */
zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
if (xqspi->tx_bytes) {
/* There is more data to send */
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
txempty);
} else {
/*
* If transfer and receive is completed then only send
* complete signal.
*/
if (!xqspi->rx_bytes) {
zynq_qspi_write(xqspi,
ZYNQ_QSPI_IDIS_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
complete(&xqspi->data_completion);
}
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/**
* zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
* @mem: the SPI memory
* @op: the memory operation to execute
*
* Executes a memory operation.
*
* This function first selects the chip and starts the memory operation.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
int err = 0, i;
u8 *tmpbuf;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi);
if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = (u8 *)&op->cmd.opcode;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = sizeof(op->cmd.opcode);
xqspi->rx_bytes = sizeof(op->cmd.opcode);
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
if (op->addr.nbytes) {
for (i = 0; i < op->addr.nbytes; i++) {
xqspi->txbuf[i] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
}
reinit_completion(&xqspi->data_completion);
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->addr.nbytes;
xqspi->rx_bytes = op->addr.nbytes;
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
if (op->dummy.nbytes) {
tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
memset(tmpbuf, 0xff, op->dummy.nbytes);
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = tmpbuf;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->dummy.nbytes;
xqspi->rx_bytes = op->dummy.nbytes;
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
kfree(tmpbuf);
}
if (op->data.nbytes) {
reinit_completion(&xqspi->data_completion);
if (op->data.dir == SPI_MEM_DATA_OUT) {
xqspi->txbuf = (u8 *)op->data.buf.out;
xqspi->tx_bytes = op->data.nbytes;
xqspi->rxbuf = NULL;
xqspi->rx_bytes = op->data.nbytes;
} else {
xqspi->txbuf = NULL;
xqspi->rxbuf = (u8 *)op->data.buf.in;
xqspi->rx_bytes = op->data.nbytes;
xqspi->tx_bytes = op->data.nbytes;
}
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
zynq_qspi_chipselect(mem->spi, false);
return err;
}
static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
.supports_op = zynq_qspi_supports_op,
.exec_op = zynq_qspi_exec_mem_op,
};
/**
* zynq_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
*
* Return: 0 on success and error value on failure
*/
static int zynq_qspi_probe(struct platform_device *pdev)
{
int ret = 0;
struct spi_controller *ctlr;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct zynq_qspi *xqspi;
struct resource *res;
u32 num_cs;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
xqspi = spi_controller_get_devdata(ctlr);
xqspi->dev = dev;
platform_set_drvdata(pdev, xqspi);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
goto remove_master;
}
init_completion(&xqspi->data_completion);
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
goto remove_master;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable APB clock.\n");
goto remove_master;
}
ret = clk_prepare_enable(xqspi->refclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto clk_dis_pclk;
}
/* QSPI controller initializations */
zynq_qspi_init_hw(xqspi);
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "irq resource not found\n");
goto remove_master;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
goto remove_master;
}
ret = of_property_read_u32(np, "num-cs",
&num_cs);
if (ret < 0)
ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
else
ctlr->num_chipselect = num_cs;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->mem_ops = &zynq_qspi_mem_ops;
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
ret = spi_register_controller(ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
goto clk_dis_all;
}
return ret;
clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
remove_master:
spi_controller_put(ctlr);
return ret;
}
/**
* zynq_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees all resources allocated to
* the device.
*
* Return: 0 on success and error value on failure
*/
static int zynq_qspi_remove(struct platform_device *pdev)
{
struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
return 0;
}
static const struct of_device_id zynq_qspi_of_match[] = {
{ .compatible = "xlnx,zynq-qspi-1.0", },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
/*
* zynq_qspi_driver - This structure defines the QSPI platform driver
*/
static struct platform_driver zynq_qspi_driver = {
.probe = zynq_qspi_probe,
.remove = zynq_qspi_remove,
.driver = {
.name = "zynq-qspi",
.of_match_table = zynq_qspi_of_match,
},
};
module_platform_driver(zynq_qspi_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
MODULE_LICENSE("GPL");
......@@ -36,6 +36,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
......@@ -1179,10 +1181,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
spi_res_release(ctlr, msg);
spi_finalize_current_message(ctlr);
spi_res_release(ctlr, msg);
return ret;
}
......@@ -2265,7 +2267,7 @@ int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
int status;
int id, first_dynamic;
if (!dev)
......@@ -2279,24 +2281,6 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;
if (!spi_controller_is_slave(ctlr)) {
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
return status;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
*/
ctlr->mode_bits |= SPI_CS_HIGH;
} else {
/* Legacy code path for GPIOs from DT */
status = of_spi_register_master(ctlr);
if (status)
return status;
}
}
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
......@@ -2353,6 +2337,25 @@ int spi_register_controller(struct spi_controller *ctlr)
* registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
if (!spi_controller_is_slave(ctlr)) {
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
return status;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
*/
ctlr->mode_bits |= SPI_CS_HIGH;
} else {
/* Legacy code path for GPIOs from DT */
status = of_spi_register_master(ctlr);
if (status)
return status;
}
}
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
......@@ -2785,11 +2788,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
size_t offset;
size_t count, i;
/* warn once about this fact that we are splitting a transfer */
dev_warn_once(&msg->spi->dev,
"spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
xfer->len, maxsize);
/* calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
......@@ -2947,6 +2945,11 @@ int spi_setup(struct spi_device *spi)
* so it is ignored here.
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
/* nothing prevents from working with active-high CS in case if it
* is driven by GPIO.
*/
if (gpio_is_valid(spi->cs_gpio))
bad_bits &= ~SPI_CS_HIGH;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
......@@ -2992,6 +2995,21 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
/**
* spi_set_cs_timing - configure CS setup, hold, and inactive delays
* @spi: the device that requires specific CS timing configuration
* @setup: CS setup time in terms of clock count
* @hold: CS hold time in terms of clock count
* @inactive_dly: CS inactive delay between transfers in terms of clock count
*/
void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
u8 inactive_dly)
{
if (spi->controller->set_cs_timing)
spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
}
EXPORT_SYMBOL_GPL(spi_set_cs_timing);
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
......@@ -3066,8 +3084,6 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
if (!xfer->speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
......
......@@ -276,17 +276,19 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
k_tmp->word_delay_usecs = u_tmp->word_delay_usecs;
if (!k_tmp->speed_hz)
k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
" xfer len %u %s%s%s%dbits %u usec %uHz\n",
" xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
u_tmp->len,
u_tmp->rx_buf ? "rx " : "",
u_tmp->tx_buf ? "tx " : "",
u_tmp->cs_change ? "cs " : "",
u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
u_tmp->delay_usecs,
u_tmp->word_delay_usecs,
u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
......
......@@ -106,8 +106,6 @@ source "drivers/staging/mt7621-pci-phy/Kconfig"
source "drivers/staging/mt7621-pinctrl/Kconfig"
source "drivers/staging/mt7621-spi/Kconfig"
source "drivers/staging/mt7621-dma/Kconfig"
source "drivers/staging/ralink-gdma/Kconfig"
......
......@@ -43,7 +43,6 @@ obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_PCI_MT7621) += mt7621-pci/
obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/
obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/
obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
......
config SPI_MT7621
tristate "MediaTek MT7621 SPI Controller"
depends on RALINK
help
This selects a driver for the MediaTek MT7621 SPI Controller.
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
- general code review and clean up
- ensure device-tree requirements are documented
Cc: NeilBrown <neil@brown.name>
......@@ -6,13 +6,9 @@ struct spi_device;
/**
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
* @chipselect: array of gpio numbers to use as chip selects
* @num_chipselect: ARRAY_SIZE(chipselect)
* @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
int *chipselect;
int num_chipselect;
bool use_dma;
};
......
......@@ -25,6 +25,7 @@ struct dma_chan;
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
u8 dma_burst_size;
bool is_slave;
/* DMA engine specific config */
......
......@@ -295,6 +295,10 @@ int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg);
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
......@@ -310,6 +314,14 @@ spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
struct sg_table *sg)
{
}
static inline
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
return false;
}
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
......
......@@ -143,7 +143,7 @@ struct spi_device {
u32 max_speed_hz;
u8 chip_select;
u8 bits_per_word;
u16 mode;
u32 mode;
#define SPI_CPHA 0x01 /* clock phase */
#define SPI_CPOL 0x02 /* clock polarity */
#define SPI_MODE_0 (0|0) /* (original MicroWire) */
......@@ -330,6 +330,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* must fail if an unrecognized or unsupported mode is requested.
* It's always safe to call this unless transfers are pending on
* the device whose settings are being modified.
* @set_cs_timing: optional hook for SPI devices to request SPI master
* controller for configuring specific CS setup time, hold time and inactive
* delay interms of clock counts
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
......@@ -363,6 +366,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
*
* @set_cs: set the logic level of the chip select line. May be called
* from interrupt context.
* @prepare_message: set up the controller to transfer a single message,
......@@ -439,13 +443,12 @@ struct spi_controller {
u16 dma_alignment;
/* spi_device.mode flags understood by this controller driver */
u16 mode_bits;
u32 mode_bits;
/* bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
/* limits on transfer speed */
u32 min_speed_hz;
......@@ -489,6 +492,17 @@ struct spi_controller {
*/
int (*setup)(struct spi_device *spi);
/*
* set_cs_timing() method is for SPI controllers that supports
* configuring CS timing.
*
* This hook allows SPI client drivers to request SPI controllers
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
void (*set_cs_timing)(struct spi_device *spi, u8 setup_clk_cycles,
u8 hold_clk_cycles, u8 inactive_clk_cycles);
/* bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
......@@ -1277,7 +1291,7 @@ struct spi_board_info {
/* mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u16 mode;
u32 mode;
/* ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
......
......@@ -44,6 +44,7 @@ extern int spi_bitbang_setup_transfer(struct spi_device *spi,
/* start or stop queue processing */
extern int spi_bitbang_start(struct spi_bitbang *spi);
extern int spi_bitbang_init(struct spi_bitbang *spi);
extern void spi_bitbang_stop(struct spi_bitbang *spi);
#endif /* __SPI_BITBANG_H */
......@@ -131,9 +131,11 @@ DECLARE_EVENT_CLASS(spi_transfer,
__field( struct spi_transfer *, xfer )
__field( int, len )
__dynamic_array(u8, rx_buf,
spi_valid_rxbuf(msg, xfer) ? xfer->len : 0)
spi_valid_rxbuf(msg, xfer) ?
(xfer->len < 64 ? xfer->len : 64) : 0)
__dynamic_array(u8, tx_buf,
spi_valid_txbuf(msg, xfer) ? xfer->len : 0)
spi_valid_txbuf(msg, xfer) ?
(xfer->len < 64 ? xfer->len : 64) : 0)
),
TP_fast_assign(
......@@ -144,11 +146,11 @@ DECLARE_EVENT_CLASS(spi_transfer,
if (spi_valid_txbuf(msg, xfer))
memcpy(__get_dynamic_array(tx_buf),
xfer->tx_buf, xfer->len);
xfer->tx_buf, __get_dynamic_array_len(tx_buf));
if (spi_valid_rxbuf(msg, xfer))
memcpy(__get_dynamic_array(rx_buf),
xfer->rx_buf, xfer->len);
xfer->rx_buf, __get_dynamic_array_len(rx_buf));
),
TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]",
......
......@@ -66,6 +66,9 @@
* @delay_usecs: If nonzero, how long to delay after the last bit transfer
* before optionally deselecting the device before the next transfer.
* @cs_change: True to deselect device before starting the next transfer.
* @word_delay_usecs: If nonzero, how long to wait between words within one
* transfer. This property needs explicit support in the SPI controller,
* otherwise it is silently ignored.
*
* This structure is mapped directly to the kernel spi_transfer structure;
* the fields have the same meanings, except of course that the pointers
......@@ -100,7 +103,8 @@ struct spi_ioc_transfer {
__u8 cs_change;
__u8 tx_nbits;
__u8 rx_nbits;
__u16 pad;
__u8 word_delay_usecs;
__u8 pad;
/* If the contents of 'struct spi_ioc_transfer' ever change
* incompatibly, then the ioctl number (currently 0) must change;
......
......@@ -10,6 +10,8 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <sound/soc.h>
......@@ -54,9 +56,18 @@ static const struct spi_device_id adau1977_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, adau1977_spi_ids);
static const struct of_device_id adau1977_spi_of_match[] = {
{ .compatible = "adi,adau1977" },
{ .compatible = "adi,adau1978" },
{ .compatible = "adi,adau1979" },
{ },
};
MODULE_DEVICE_TABLE(of, adau1977_spi_of_match);
static struct spi_driver adau1977_spi_driver = {
.driver = {
.name = "adau1977",
.of_match_table = of_match_ptr(adau1977_spi_of_match),
},
.probe = adau1977_spi_probe,
.id_table = adau1977_spi_ids,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment