Commit 70ada779 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6

* 'next-spi' of git://git.secretlab.ca/git/linux-2.6: (53 commits)
  spi/omap2_mcspi: Verify TX reg is empty after TX only xfer with DMA
  spi/omap2_mcspi: disable channel after TX_ONLY transfer in PIO mode
  spi/bfin_spi: namespace local structs
  spi/bfin_spi: init early
  spi/bfin_spi: check per-transfer bits_per_word
  spi/bfin_spi: warn when CS is driven by hardware (CPHA=0)
  spi/bfin_spi: cs should be always low when a new transfer begins
  spi/bfin_spi: fix typo in comment
  spi/bfin_spi: reject unsupported SPI modes
  spi/bfin_spi: use dma_disable_irq_nosync() in irq handler
  spi/bfin_spi: combine duplicate SPI_CTL read/write logic
  spi/bfin_spi: reset ctl_reg bits when setup is run again on a device
  spi/bfin_spi: push all size checks into the transfer function
  spi/bfin_spi: use nosync when disabling the IRQ from the IRQ handler
  spi/bfin_spi: sync hardware state before reprogramming everything
  spi/bfin_spi: save/restore state when suspending/resuming
  spi/bfin_spi: redo GPIO CS handling
  Blackfin: SPI: expand SPI bitmasks
  spi/bfin_spi: use the SPI namespaced bit names
  spi/bfin_spi: drop extra memory we don't need
  ...
parents b22793f7 2764c500
* SPI (Serial Peripheral Interface)
Required properties:
- cell-index : SPI controller index.
- cell-index : QE SPI subblock index.
0: QE subblock SPI1
1: QE subblock SPI2
- compatible : should be "fsl,spi".
- mode : the SPI operation mode, it can be "cpu" or "cpu-qe".
- reg : Offset and length of the register set for the device
......@@ -29,3 +31,23 @@ Example:
gpios = <&gpio 18 1 // device reg=<0>
&gpio 19 1>; // device reg=<1>
};
* eSPI (Enhanced Serial Peripheral Interface)
Required properties:
- compatible : should be "fsl,mpc8536-espi".
- reg : Offset and length of the register set for the device.
- interrupts : should contain eSPI interrupt, the device has one interrupt.
- fsl,espi-num-chipselects : the number of the chipselect signals.
Example:
spi@110000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,mpc8536-espi";
reg = <0x110000 0x1000>;
interrupts = <53 0x2>;
interrupt-parent = <&mpic>;
fsl,espi-num-chipselects = <4>;
};
......@@ -172,18 +172,12 @@ static void phy3250_spi_cs_set(u32 control)
}
static struct pl022_config_chip spi0_chip_info = {
.lbm = LOOPBACK_DISABLED,
.com_mode = INTERRUPT_TRANSFER,
.iface = SSP_INTERFACE_MOTOROLA_SPI,
.hierarchy = SSP_MASTER,
.slave_tx_disable = 0,
.endian_tx = SSP_TX_LSB,
.endian_rx = SSP_RX_LSB,
.data_size = SSP_DATA_BITS_8,
.rx_lev_trig = SSP_RX_4_OR_MORE_ELEM,
.tx_lev_trig = SSP_TX_4_OR_MORE_EMPTY_LOC,
.clk_phase = SSP_CLK_FIRST_EDGE,
.clk_pol = SSP_CLK_POL_IDLE_LOW,
.ctrl_len = SSP_BITS_8,
.wait_state = SSP_MWIRE_WAIT_ZERO,
.duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
......@@ -239,6 +233,7 @@ static int __init phy3250_spi_board_register(void)
.max_speed_hz = 5000000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
.platform_data = &eeprom,
.controller_data = &spi0_chip_info,
},
......
......@@ -46,7 +46,6 @@ static ssize_t dummy_looptest(struct device *dev,
* struct, this is just used here to alter the behaviour of the chip
* in order to perform tests.
*/
struct pl022_config_chip *chip_info = spi->controller_data;
int status;
u8 txbuf[14] = {0xDE, 0xAD, 0xBE, 0xEF, 0x2B, 0xAD,
0xCA, 0xFE, 0xBA, 0xBE, 0xB1, 0x05,
......@@ -72,7 +71,7 @@ static ssize_t dummy_looptest(struct device *dev,
* Force chip to 8 bit mode
* WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC!
*/
chip_info->data_size = SSP_DATA_BITS_8;
spi->bits_per_word = 8;
/* You should NOT DO THIS EITHER */
spi->master->setup(spi);
......@@ -159,7 +158,7 @@ static ssize_t dummy_looptest(struct device *dev,
* Force chip to 16 bit mode
* WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC!
*/
chip_info->data_size = SSP_DATA_BITS_16;
spi->bits_per_word = 16;
/* You should NOT DO THIS EITHER */
spi->master->setup(spi);
......
......@@ -30,8 +30,6 @@ static void select_dummy_chip(u32 chipselect)
}
struct pl022_config_chip dummy_chip_info = {
/* Nominally this is LOOPBACK_DISABLED, but this is our dummy chip! */
.lbm = LOOPBACK_ENABLED,
/*
* available POLLING_TRANSFER and INTERRUPT_TRANSFER,
* DMA_TRANSFER does not work
......@@ -42,14 +40,8 @@ struct pl022_config_chip dummy_chip_info = {
.hierarchy = SSP_MASTER,
/* 0 = drive TX even as slave, 1 = do not drive TX as slave */
.slave_tx_disable = 0,
/* LSB first */
.endian_tx = SSP_TX_LSB,
.endian_rx = SSP_RX_LSB,
.data_size = SSP_DATA_BITS_8, /* used to be 12 in some default */
.rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
.tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
.clk_phase = SSP_CLK_SECOND_EDGE,
.clk_pol = SSP_CLK_POL_IDLE_LOW,
.ctrl_len = SSP_BITS_12,
.wait_state = SSP_MWIRE_WAIT_ZERO,
.duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
......@@ -75,7 +67,7 @@ static struct spi_board_info u300_spi_devices[] = {
.bus_num = 0, /* Only one bus on this chip */
.chip_select = 0,
/* Means SPI_CS_HIGH, change if e.g low CS */
.mode = 0,
.mode = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP,
},
#endif
};
......
......@@ -55,19 +55,13 @@ static void ab4500_spi_cs_control(u32 command)
}
struct pl022_config_chip ab4500_chip_info = {
.lbm = LOOPBACK_DISABLED,
.com_mode = INTERRUPT_TRANSFER,
.iface = SSP_INTERFACE_MOTOROLA_SPI,
/* we can act as master only */
.hierarchy = SSP_MASTER,
.slave_tx_disable = 0,
.endian_rx = SSP_RX_MSB,
.endian_tx = SSP_TX_MSB,
.data_size = SSP_DATA_BITS_24,
.rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
.tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
.clk_phase = SSP_CLK_SECOND_EDGE,
.clk_pol = SSP_CLK_POL_IDLE_HIGH,
.cs_control = ab4500_spi_cs_control,
};
......@@ -83,7 +77,7 @@ static struct spi_board_info u8500_spi_devices[] = {
.max_speed_hz = 12000000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
.mode = SPI_MODE_3,
.irq = IRQ_DB8500_AB8500,
},
};
......
......@@ -32,6 +32,8 @@ struct s3c64xx_spi_csinfo {
* struct s3c64xx_spi_info - SPI Controller defining structure
* @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
* @src_clk_name: Platform name of the corresponding clock.
* @clk_from_cmu: If the SPI clock/prescalar control block is present
* by the platform's clock-management-unit and not in SPI controller.
* @num_cs: Number of CS this controller emulates.
* @cfg_gpio: Configure pins for this SPI controller.
* @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
......@@ -41,6 +43,7 @@ struct s3c64xx_spi_csinfo {
struct s3c64xx_spi_info {
int src_clk_nr;
char *src_clk_name;
bool clk_from_cmu;
int num_cs;
......
......@@ -11,26 +11,17 @@
#define MIN_SPI_BAUD_VAL 2
#define SPI_READ 0
#define SPI_WRITE 1
#define SPI_CTRL_OFF 0x0
#define SPI_FLAG_OFF 0x4
#define SPI_STAT_OFF 0x8
#define SPI_TXBUFF_OFF 0xc
#define SPI_RXBUFF_OFF 0x10
#define SPI_BAUD_OFF 0x14
#define SPI_SHAW_OFF 0x18
#define BIT_CTL_ENABLE 0x4000
#define BIT_CTL_OPENDRAIN 0x2000
#define BIT_CTL_MASTER 0x1000
#define BIT_CTL_POLAR 0x0800
#define BIT_CTL_PHASE 0x0400
#define BIT_CTL_BITORDER 0x0200
#define BIT_CTL_CPOL 0x0800
#define BIT_CTL_CPHA 0x0400
#define BIT_CTL_LSBF 0x0200
#define BIT_CTL_WORDSIZE 0x0100
#define BIT_CTL_MISOENABLE 0x0020
#define BIT_CTL_EMISO 0x0020
#define BIT_CTL_PSSE 0x0010
#define BIT_CTL_GM 0x0008
#define BIT_CTL_SZ 0x0004
#define BIT_CTL_RXMOD 0x0000
#define BIT_CTL_TXMOD 0x0001
#define BIT_CTL_TIMOD_DMA_TX 0x0003
......@@ -50,61 +41,7 @@
#define BIT_STU_SENDOVER 0x0001
#define BIT_STU_RECVFULL 0x0020
#define CFG_SPI_ENABLE 1
#define CFG_SPI_DISABLE 0
#define CFG_SPI_OUTENABLE 1
#define CFG_SPI_OUTDISABLE 0
#define CFG_SPI_ACTLOW 1
#define CFG_SPI_ACTHIGH 0
#define CFG_SPI_PHASESTART 1
#define CFG_SPI_PHASEMID 0
#define CFG_SPI_MASTER 1
#define CFG_SPI_SLAVE 0
#define CFG_SPI_SENELAST 0
#define CFG_SPI_SENDZERO 1
#define CFG_SPI_RCVFLUSH 1
#define CFG_SPI_RCVDISCARD 0
#define CFG_SPI_LSBFIRST 1
#define CFG_SPI_MSBFIRST 0
#define CFG_SPI_WORDSIZE16 1
#define CFG_SPI_WORDSIZE8 0
#define CFG_SPI_MISOENABLE 1
#define CFG_SPI_MISODISABLE 0
#define CFG_SPI_READ 0x00
#define CFG_SPI_WRITE 0x01
#define CFG_SPI_DMAREAD 0x02
#define CFG_SPI_DMAWRITE 0x03
#define CFG_SPI_CSCLEARALL 0
#define CFG_SPI_CHIPSEL1 1
#define CFG_SPI_CHIPSEL2 2
#define CFG_SPI_CHIPSEL3 3
#define CFG_SPI_CHIPSEL4 4
#define CFG_SPI_CHIPSEL5 5
#define CFG_SPI_CHIPSEL6 6
#define CFG_SPI_CHIPSEL7 7
#define CFG_SPI_CS1VALUE 1
#define CFG_SPI_CS2VALUE 2
#define CFG_SPI_CS3VALUE 3
#define CFG_SPI_CS4VALUE 4
#define CFG_SPI_CS5VALUE 5
#define CFG_SPI_CS6VALUE 6
#define CFG_SPI_CS7VALUE 7
#define CMD_SPI_SET_BAUDRATE 2
#define CMD_SPI_GET_SYSTEMCLOCK 25
#define CMD_SPI_SET_WRITECONTINUOUS 26
#define MAX_CTRL_CS 8 /* cs in spi controller */
/* device.platform_data for SSP controller devices */
struct bfin5xx_spi_master {
......@@ -120,9 +57,7 @@ struct bfin5xx_spi_chip {
u16 ctl_reg;
u8 enable_dma;
u8 bits_per_word;
u8 cs_change_per_word;
u16 cs_chg_udelay; /* Some devices require 16-bit delays */
u32 cs_gpio;
/* Value to send if no TX value is supplied, usually 0x0 or 0xFFFF */
u16 idle_tx_val;
u8 pio_interrupt; /* Enable spi data irq */
......
......@@ -108,6 +108,58 @@ rtc@68 {
};
};
spi@7000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,mpc8536-espi";
reg = <0x7000 0x1000>;
interrupts = <59 0x2>;
interrupt-parent = <&mpic>;
fsl,espi-num-chipselects = <4>;
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "spansion,s25sl12801";
reg = <0>;
spi-max-frequency = <40000000>;
partition@u-boot {
label = "u-boot";
reg = <0x00000000 0x00100000>;
read-only;
};
partition@kernel {
label = "kernel";
reg = <0x00100000 0x00500000>;
read-only;
};
partition@dtb {
label = "dtb";
reg = <0x00600000 0x00100000>;
read-only;
};
partition@fs {
label = "file system";
reg = <0x00700000 0x00900000>;
};
};
flash@1 {
compatible = "spansion,s25sl12801";
reg = <1>;
spi-max-frequency = <40000000>;
};
flash@2 {
compatible = "spansion,s25sl12801";
reg = <2>;
spi-max-frequency = <40000000>;
};
flash@3 {
compatible = "spansion,s25sl12801";
reg = <3>;
spi-max-frequency = <40000000>;
};
};
dma@21300 {
#address-cells = <1>;
#size-cells = <1>;
......
......@@ -236,22 +236,19 @@ dma-channel@180 {
};
spi@110000 {
cell-index = <0>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,espi";
compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
reg = <0x110000 0x1000>;
interrupts = <53 0x2>;
interrupt-parent = <&mpic>;
espi,num-ss-bits = <4>;
mode = "cpu";
fsl,espi-num-chipselects = <4>;
fsl_m25p80@0 {
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,espi-flash";
compatible = "spansion,s25sl12801";
reg = <0>;
linux,modalias = "fsl_m25p80";
spi-max-frequency = <40000000>; /* input clock */
partition@u-boot {
label = "u-boot";
......
......@@ -83,6 +83,11 @@ static int __devinit ab8500_spi_probe(struct spi_device *spi)
struct ab8500 *ab8500;
int ret;
spi->bits_per_word = 24;
ret = spi_setup(spi);
if (ret < 0)
return ret;
ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
if (!ab8500)
return -ENOMEM;
......
......@@ -182,12 +182,27 @@ config SPI_MPC512x_PSC
This enables using the Freescale MPC5121 Programmable Serial
Controller in SPI master mode.
config SPI_MPC8xxx
tristate "Freescale MPC8xxx SPI controller"
config SPI_FSL_LIB
tristate
depends on FSL_SOC
config SPI_FSL_SPI
tristate "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
This enables using the Freescale MPC8xxx SPI controllers in master
mode.
This enables using the Freescale SPI controllers in master mode.
MPC83xx platform uses the controller in cpu mode or CPM/QE mode.
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
tristate "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
This enables using the Freescale eSPI controllers in master mode.
From MPC8536, 85xx platform uses the controller, and all P10xx,
P20xx, P30xx,P40xx, P50xx uses this controller.
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
......@@ -298,6 +313,13 @@ config SPI_STMP3XXX
help
SPI driver for Freescale STMP37xx/378x SoC SSP interface
config SPI_TOPCLIFF_PCH
tristate "Topcliff PCH SPI Controller"
depends on PCI
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on GENERIC_GPIO && CPU_TX49XX
......
......@@ -2,9 +2,7 @@
# Makefile for kernel SPI drivers.
#
ifeq ($(CONFIG_SPI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
# small core, mostly translating board-specific
# config declarations into driver model code
......@@ -34,11 +32,14 @@ obj-$(CONFIG_SPI_PL022) += amba-pl022.o
obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o
obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
......
......@@ -27,7 +27,6 @@
/*
* TODO:
* - add timeout on polled transfers
* - add generic DMA framework support
*/
#include <linux/init.h>
......@@ -45,6 +44,9 @@
#include <linux/amba/pl022.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
/*
* This macro is used to define some register default values.
......@@ -381,6 +383,14 @@ struct pl022 {
enum ssp_reading read;
enum ssp_writing write;
u32 exp_fifo_level;
/* DMA settings */
#ifdef CONFIG_DMA_ENGINE
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
struct sg_table sgt_rx;
struct sg_table sgt_tx;
char *dummypage;
#endif
};
/**
......@@ -406,7 +416,7 @@ struct chip_data {
u16 dmacr;
u16 cpsr;
u8 n_bytes;
u8 enable_dma:1;
bool enable_dma;
enum ssp_reading read;
enum ssp_writing write;
void (*cs_control) (u32 command);
......@@ -763,6 +773,371 @@ static void *next_transfer(struct pl022 *pl022)
}
return STATE_DONE;
}
/*
* This DMA functionality is only compiled in if we have
* access to the generic DMA devices/DMA engine.
*/
#ifdef CONFIG_DMA_ENGINE
static void unmap_free_dma_scatter(struct pl022 *pl022)
{
/* Unmap and free the SG tables */
dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
pl022->sgt_rx.nents, DMA_FROM_DEVICE);
sg_free_table(&pl022->sgt_rx);
sg_free_table(&pl022->sgt_tx);
}
static void dma_callback(void *data)
{
struct pl022 *pl022 = data;
struct spi_message *msg = pl022->cur_msg;
BUG_ON(!pl022->sgt_rx.sgl);
#ifdef VERBOSE_DEBUG
/*
* Optionally dump out buffers to inspect contents, this is
* good if you want to convince yourself that the loopback
* read/write contents are the same, when adopting to a new
* DMA engine.
*/
{
struct scatterlist *sg;
unsigned int i;
dma_sync_sg_for_cpu(&pl022->adev->dev,
pl022->sgt_rx.sgl,
pl022->sgt_rx.nents,
DMA_FROM_DEVICE);
for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
print_hex_dump(KERN_ERR, "SPI RX: ",
DUMP_PREFIX_OFFSET,
16,
1,
sg_virt(sg),
sg_dma_len(sg),
1);
}
for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
print_hex_dump(KERN_ERR, "SPI TX: ",
DUMP_PREFIX_OFFSET,
16,
1,
sg_virt(sg),
sg_dma_len(sg),
1);
}
}
#endif
unmap_free_dma_scatter(pl022);
/* Update total bytes transfered */
msg->actual_length += pl022->cur_transfer->len;
if (pl022->cur_transfer->cs_change)
pl022->cur_chip->
cs_control(SSP_CHIP_DESELECT);
/* Move to next transfer */
msg->state = next_transfer(pl022);
tasklet_schedule(&pl022->pump_transfers);
}
static void setup_dma_scatter(struct pl022 *pl022,
void *buffer,
unsigned int length,
struct sg_table *sgtab)
{
struct scatterlist *sg;
int bytesleft = length;
void *bufp = buffer;
int mapbytes;
int i;
if (buffer) {
for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
sg_set_page(sg, virt_to_page(bufp),
mapbytes, offset_in_page(bufp));
bufp += mapbytes;
bytesleft -= mapbytes;
dev_dbg(&pl022->adev->dev,
"set RX/TX target page @ %p, %d bytes, %d left\n",
bufp, mapbytes, bytesleft);
}
} else {
/* Map the dummy buffer on every page */
for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
if (bytesleft < PAGE_SIZE)
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE;
sg_set_page(sg, virt_to_page(pl022->dummypage),
mapbytes, 0);
bytesleft -= mapbytes;
dev_dbg(&pl022->adev->dev,
"set RX/TX to dummy page %d bytes, %d left\n",
mapbytes, bytesleft);
}
}
BUG_ON(bytesleft);
}
/**
* configure_dma - configures the channels for the next transfer
* @pl022: SSP driver's private data structure
*/
static int configure_dma(struct pl022 *pl022)
{
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
.direction = DMA_FROM_DEVICE,
.src_maxburst = pl022->vendor->fifodepth >> 1,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
.direction = DMA_TO_DEVICE,
.dst_maxburst = pl022->vendor->fifodepth >> 1,
};
unsigned int pages;
int ret;
int sglen;
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
dma_cookie_t cookie;
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
switch (pl022->read) {
case READING_NULL:
/* Use the same as for writing */
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
break;
case READING_U8:
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case READING_U16:
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case READING_U32:
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
}
switch (pl022->write) {
case WRITING_NULL:
/* Use the same as for reading */
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
break;
case WRITING_U8:
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case WRITING_U16:
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case WRITING_U32:
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;;
break;
}
/* SPI pecularity: we need to read and write the same width */
if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
rx_conf.src_addr_width = tx_conf.dst_addr_width;
if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
tx_conf.dst_addr_width = rx_conf.src_addr_width;
BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
(unsigned long) &rx_conf);
txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
(unsigned long) &tx_conf);
/* Create sglists for the transfers */
pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL);
if (ret)
goto err_alloc_rx_sg;
ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL);
if (ret)
goto err_alloc_tx_sg;
/* Fill in the scatterlists for the RX+TX buffers */
setup_dma_scatter(pl022, pl022->rx,
pl022->cur_transfer->len, &pl022->sgt_rx);
setup_dma_scatter(pl022, pl022->tx,
pl022->cur_transfer->len, &pl022->sgt_tx);
/* Map DMA buffers */
sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
pl022->sgt_rx.nents, DMA_FROM_DEVICE);
if (!sglen)
goto err_rx_sgmap;
sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
if (!sglen)
goto err_tx_sgmap;
/* Send both scatterlists */
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
pl022->sgt_rx.sgl,
pl022->sgt_rx.nents,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_rxdesc;
txdesc = txchan->device->device_prep_slave_sg(txchan,
pl022->sgt_tx.sgl,
pl022->sgt_tx.nents,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto err_txdesc;
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_callback;
rxdesc->callback_param = pl022;
/* Submit and fire RX and TX with TX last so we're ready to read! */
cookie = rxdesc->tx_submit(rxdesc);
if (dma_submit_error(cookie))
goto err_submit_rx;
cookie = txdesc->tx_submit(txdesc);
if (dma_submit_error(cookie))
goto err_submit_tx;
rxchan->device->device_issue_pending(rxchan);
txchan->device->device_issue_pending(txchan);
return 0;
err_submit_tx:
err_submit_rx:
err_txdesc:
txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
err_rxdesc:
rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
err_tx_sgmap:
dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
pl022->sgt_tx.nents, DMA_FROM_DEVICE);
err_rx_sgmap:
sg_free_table(&pl022->sgt_tx);
err_alloc_tx_sg:
sg_free_table(&pl022->sgt_rx);
err_alloc_rx_sg:
return -ENOMEM;
}
static int __init pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/*
* We need both RX and TX channels to do DMA, else do none
* of them.
*/
pl022->dma_rx_channel = dma_request_channel(mask,
pl022->master_info->dma_filter,
pl022->master_info->dma_rx_param);
if (!pl022->dma_rx_channel) {
dev_err(&pl022->adev->dev, "no RX DMA channel!\n");
goto err_no_rxchan;
}
pl022->dma_tx_channel = dma_request_channel(mask,
pl022->master_info->dma_filter,
pl022->master_info->dma_tx_param);
if (!pl022->dma_tx_channel) {
dev_err(&pl022->adev->dev, "no TX DMA channel!\n");
goto err_no_txchan;
}
pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pl022->dummypage) {
dev_err(&pl022->adev->dev, "no DMA dummypage!\n");
goto err_no_dummypage;
}
dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
dma_chan_name(pl022->dma_rx_channel),
dma_chan_name(pl022->dma_tx_channel));
return 0;
err_no_dummypage:
dma_release_channel(pl022->dma_tx_channel);
err_no_txchan:
dma_release_channel(pl022->dma_rx_channel);
pl022->dma_rx_channel = NULL;
err_no_rxchan:
return -ENODEV;
}
static void terminate_dma(struct pl022 *pl022)
{
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
unmap_free_dma_scatter(pl022);
}
static void pl022_dma_remove(struct pl022 *pl022)
{
if (pl022->busy)
terminate_dma(pl022);
if (pl022->dma_tx_channel)
dma_release_channel(pl022->dma_tx_channel);
if (pl022->dma_rx_channel)
dma_release_channel(pl022->dma_rx_channel);
kfree(pl022->dummypage);
}
#else
static inline int configure_dma(struct pl022 *pl022)
{
return -ENODEV;
}
static inline int pl022_dma_probe(struct pl022 *pl022)
{
return 0;
}
static inline void pl022_dma_remove(struct pl022 *pl022)
{
}
#endif
/**
* pl022_interrupt_handler - Interrupt handler for SSP controller
*
......@@ -794,14 +1169,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
if (unlikely(!irq_status))
return IRQ_NONE;
/* This handles the error code interrupts */
/*
* This handles the FIFO interrupts, the timeout
* interrupts are flatly ignored, they cannot be
* trusted.
*/
if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
/*
* Overrun interrupt - bail out since our Data has been
* corrupted
*/
dev_err(&pl022->adev->dev,
"FIFO overrun\n");
dev_err(&pl022->adev->dev, "FIFO overrun\n");
if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
dev_err(&pl022->adev->dev,
"RXFIFO is full\n");
......@@ -896,8 +1274,8 @@ static int set_up_next_transfer(struct pl022 *pl022,
}
/**
* pump_transfers - Tasklet function which schedules next interrupt transfer
* when running in interrupt transfer mode.
* pump_transfers - Tasklet function which schedules next transfer
* when running in interrupt or DMA transfer mode.
* @data: SSP driver private data structure
*
*/
......@@ -954,65 +1332,23 @@ static void pump_transfers(unsigned long data)
}
/* Flush the FIFOs and let's go! */
flush(pl022);
writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
}
/**
* NOT IMPLEMENTED
* configure_dma - It configures the DMA pipes for DMA transfers
* @data: SSP driver's private data structure
*
*/
static int configure_dma(void *data)
{
struct pl022 *pl022 = data;
dev_dbg(&pl022->adev->dev, "configure DMA\n");
return -ENOTSUPP;
}
/**
* do_dma_transfer - It handles transfers of the current message
* if it is DMA xfer.
* NOT FULLY IMPLEMENTED
* @data: SSP driver's private data structure
*/
static void do_dma_transfer(void *data)
{
struct pl022 *pl022 = data;
if (configure_dma(data)) {
dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
if (pl022->cur_chip->enable_dma) {
if (configure_dma(pl022)) {
dev_dbg(&pl022->adev->dev,
"configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
/* TODO: Implememt DMA setup of pipes here */
/* Enable target chip, set up transfer */
pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
/* Error path */
pl022->cur_msg->state = STATE_ERROR;
pl022->cur_msg->status = -EIO;
giveback(pl022);
return;
}
/* Enable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
/* TODO: Enable the DMA transfer here */
return;
err_config_dma:
pl022->cur_msg->state = STATE_ERROR;
pl022->cur_msg->status = -EIO;
giveback(pl022);
return;
err_config_dma:
writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
}
static void do_interrupt_transfer(void *data)
static void do_interrupt_dma_transfer(struct pl022 *pl022)
{
struct pl022 *pl022 = data;
u32 irqflags = ENABLE_ALL_INTERRUPTS;
/* Enable target chip */
pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
......@@ -1023,15 +1359,26 @@ static void do_interrupt_transfer(void *data)
giveback(pl022);
return;
}
/* If we're using DMA, set up DMA here */
if (pl022->cur_chip->enable_dma) {
/* Configure DMA transfer */
if (configure_dma(pl022)) {
dev_dbg(&pl022->adev->dev,
"configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
/* Disable interrupts in DMA mode, IRQ from DMA controller */
irqflags = DISABLE_ALL_INTERRUPTS;
}
err_config_dma:
/* Enable SSP, turn on interrupts */
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
writew(irqflags, SSP_IMSC(pl022->virtbase));
}
static void do_polling_transfer(void *data)
static void do_polling_transfer(struct pl022 *pl022)
{
struct pl022 *pl022 = data;
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
......@@ -1101,7 +1448,7 @@ static void do_polling_transfer(void *data)
*
* This function checks if there is any spi message in the queue that
* needs processing and delegate control to appropriate function
* do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
* do_polling_transfer()/do_interrupt_dma_transfer()
* based on the kind of the transfer
*
*/
......@@ -1150,10 +1497,8 @@ static void pump_messages(struct work_struct *work)
if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
do_polling_transfer(pl022);
else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
do_interrupt_transfer(pl022);
else
do_dma_transfer(pl022);
do_interrupt_dma_transfer(pl022);
}
......@@ -1248,100 +1593,56 @@ static int destroy_queue(struct pl022 *pl022)
}
static int verify_controller_parameters(struct pl022 *pl022,
struct pl022_config_chip *chip_info)
struct pl022_config_chip const *chip_info)
{
if ((chip_info->lbm != LOOPBACK_ENABLED)
&& (chip_info->lbm != LOOPBACK_DISABLED)) {
dev_err(chip_info->dev,
"loopback Mode is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
|| (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"interface is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
(!pl022->vendor->unidir)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"unidirectional mode not supported in this "
"hardware version\n");
return -EINVAL;
}
if ((chip_info->hierarchy != SSP_MASTER)
&& (chip_info->hierarchy != SSP_SLAVE)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"hierarchy is configured incorrectly\n");
return -EINVAL;
}
if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
|| ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
dev_err(chip_info->dev,
"cpsdvsr is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->endian_rx != SSP_RX_MSB)
&& (chip_info->endian_rx != SSP_RX_LSB)) {
dev_err(chip_info->dev,
"RX FIFO endianess is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->endian_tx != SSP_TX_MSB)
&& (chip_info->endian_tx != SSP_TX_LSB)) {
dev_err(chip_info->dev,
"TX FIFO endianess is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->data_size < SSP_DATA_BITS_4)
|| (chip_info->data_size > SSP_DATA_BITS_32)) {
dev_err(chip_info->dev,
"DATA Size is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->com_mode != INTERRUPT_TRANSFER)
&& (chip_info->com_mode != DMA_TRANSFER)
&& (chip_info->com_mode != POLLING_TRANSFER)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"Communication mode is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
|| (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
|| (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
&& (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
dev_err(chip_info->dev,
"Clock Phase is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
&& (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
dev_err(chip_info->dev,
"Clock Polarity is configured incorrectly\n");
return -EINVAL;
}
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
|| (chip_info->ctrl_len > SSP_BITS_32)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"CTRL LEN is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
&& (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"Wait State is configured incorrectly\n");
return -EINVAL;
}
......@@ -1350,24 +1651,20 @@ static int verify_controller_parameters(struct pl022 *pl022,
if ((chip_info->duplex !=
SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
&& (chip_info->duplex !=
SSP_MICROWIRE_CHANNEL_HALF_DUPLEX))
dev_err(chip_info->dev,
SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
dev_err(&pl022->adev->dev,
"Microwire duplex mode is configured incorrectly\n");
return -EINVAL;
}
} else {
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
dev_err(chip_info->dev,
dev_err(&pl022->adev->dev,
"Microwire half duplex mode requested,"
" but this is only available in the"
" ST version of PL022\n");
return -EINVAL;
}
}
if (chip_info->cs_control == NULL) {
dev_warn(chip_info->dev,
"Chip Select Function is NULL for this chip\n");
chip_info->cs_control = null_cs_control;
}
return 0;
}
......@@ -1467,22 +1764,24 @@ static int calculate_effective_freq(struct pl022 *pl022,
return 0;
}
/**
* NOT IMPLEMENTED
* process_dma_info - Processes the DMA info provided by client drivers
* @chip_info: chip info provided by client device
* @chip: Runtime state maintained by the SSP controller for each spi device
*
* This function processes and stores DMA config provided by client driver
* into the runtime state maintained by the SSP controller driver
*/
static int process_dma_info(struct pl022_config_chip *chip_info,
struct chip_data *chip)
{
dev_err(chip_info->dev,
"cannot process DMA info, DMA not implemented!\n");
return -ENOTSUPP;
}
/*
* A piece of default chip info unless the platform
* supplies it.
*/
static const struct pl022_config_chip pl022_default_chip_info = {
.com_mode = POLLING_TRANSFER,
.iface = SSP_INTERFACE_MOTOROLA_SPI,
.hierarchy = SSP_SLAVE,
.slave_tx_disable = DO_NOT_DRIVE_TX,
.rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
.tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
.ctrl_len = SSP_BITS_8,
.wait_state = SSP_MWIRE_WAIT_ZERO,
.duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
.cs_control = null_cs_control,
};
/**
* pl022_setup - setup function registered to SPI master framework
......@@ -1496,23 +1795,15 @@ static int process_dma_info(struct pl022_config_chip *chip_info,
* controller hardware here, that is not done until the actual transfer
* commence.
*/
/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_LOOP)
static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip *chip_info;
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
struct ssp_clock_params clk_freq;
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
if (spi->mode & ~MODEBITS) {
dev_dbg(&spi->dev, "unsupported mode bits %x\n",
spi->mode & ~MODEBITS);
return -EINVAL;
}
unsigned int bits = spi->bits_per_word;
u32 tmp;
if (!spi->max_speed_hz)
return -EINVAL;
......@@ -1535,48 +1826,13 @@ static int pl022_setup(struct spi_device *spi)
chip_info = spi->controller_data;
if (chip_info == NULL) {
chip_info = &pl022_default_chip_info;
/* spi_board_info.controller_data not is supplied */
dev_dbg(&spi->dev,
"using default controller_data settings\n");
chip_info =
kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
if (!chip_info) {
dev_err(&spi->dev,
"cannot allocate controller data\n");
status = -ENOMEM;
goto err_first_setup;
}
dev_dbg(&spi->dev, "allocated memory for controller data\n");
/* Pointer back to the SPI device */
chip_info->dev = &spi->dev;
/*
* Set controller data default values:
* Polling is supported by default
*/
chip_info->lbm = LOOPBACK_DISABLED;
chip_info->com_mode = POLLING_TRANSFER;
chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
chip_info->hierarchy = SSP_SLAVE;
chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
chip_info->endian_tx = SSP_TX_LSB;
chip_info->endian_rx = SSP_RX_LSB;
chip_info->data_size = SSP_DATA_BITS_12;
chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
chip_info->ctrl_len = SSP_BITS_8;
chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
chip_info->cs_control = null_cs_control;
} else {
} else
dev_dbg(&spi->dev,
"using user supplied controller_data settings\n");
}
/*
* We can override with custom divisors, else we use the board
......@@ -1586,29 +1842,48 @@ static int pl022_setup(struct spi_device *spi)
&& (0 == chip_info->clk_freq.scr)) {
status = calculate_effective_freq(pl022,
spi->max_speed_hz,
&chip_info->clk_freq);
&clk_freq);
if (status < 0)
goto err_config_params;
} else {
if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
chip_info->clk_freq.cpsdvsr =
chip_info->clk_freq.cpsdvsr - 1;
memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
if ((clk_freq.cpsdvsr % 2) != 0)
clk_freq.cpsdvsr =
clk_freq.cpsdvsr - 1;
}
if ((clk_freq.cpsdvsr < CPSDVR_MIN)
|| (clk_freq.cpsdvsr > CPSDVR_MAX)) {
dev_err(&spi->dev,
"cpsdvsr is configured incorrectly\n");
goto err_config_params;
}
status = verify_controller_parameters(pl022, chip_info);
if (status) {
dev_err(&spi->dev, "controller data is incorrect");
goto err_config_params;
}
/* Now set controller state based on controller data */
chip->xfer_type = chip_info->com_mode;
if (!chip_info->cs_control) {
chip->cs_control = null_cs_control;
dev_warn(&spi->dev,
"chip select function is NULL for this chip\n");
} else
chip->cs_control = chip_info->cs_control;
if (chip_info->data_size <= 8) {
dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
if (bits <= 3) {
/* PL022 doesn't support less than 4-bits */
status = -ENOTSUPP;
goto err_config_params;
} else if (bits <= 8) {
dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
chip->n_bytes = 1;
chip->read = READING_U8;
chip->write = WRITING_U8;
} else if (chip_info->data_size <= 16) {
} else if (bits <= 16) {
dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
chip->n_bytes = 2;
chip->read = READING_U16;
......@@ -1625,6 +1900,7 @@ static int pl022_setup(struct spi_device *spi)
dev_err(&spi->dev,
"a standard pl022 can only handle "
"1 <= n <= 16 bit words\n");
status = -ENOTSUPP;
goto err_config_params;
}
}
......@@ -1636,9 +1912,8 @@ static int pl022_setup(struct spi_device *spi)
chip->cpsr = 0;
if ((chip_info->com_mode == DMA_TRANSFER)
&& ((pl022->master_info)->enable_dma)) {
chip->enable_dma = 1;
chip->enable_dma = true;
dev_dbg(&spi->dev, "DMA mode set in controller state\n");
status = process_dma_info(chip_info, chip);
if (status < 0)
goto err_config_params;
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
......@@ -1646,7 +1921,7 @@ static int pl022_setup(struct spi_device *spi)
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
SSP_DMACR_MASK_TXDMAE, 1);
} else {
chip->enable_dma = 0;
chip->enable_dma = false;
dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
SSP_DMACR_MASK_RXDMAE, 0);
......@@ -1654,10 +1929,12 @@ static int pl022_setup(struct spi_device *spi)
SSP_DMACR_MASK_TXDMAE, 1);
}
chip->cpsr = chip_info->clk_freq.cpsdvsr;
chip->cpsr = clk_freq.cpsdvsr;
/* Special setup for the ST micro extended control registers */
if (pl022->vendor->extended_cr) {
u32 etx;
if (pl022->vendor->pl023) {
/* These bits are only in the PL023 */
SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
......@@ -1673,29 +1950,51 @@ static int pl022_setup(struct spi_device *spi)
SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
SSP_CR1_MASK_MWAIT_ST, 6);
}
SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
SSP_WRITE_BITS(chip->cr0, bits - 1,
SSP_CR0_MASK_DSS_ST, 0);
SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx,
SSP_CR1_MASK_RENDN_ST, 4);
SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx,
SSP_CR1_MASK_TENDN_ST, 5);
if (spi->mode & SPI_LSB_FIRST) {
tmp = SSP_RX_LSB;
etx = SSP_TX_LSB;
} else {
tmp = SSP_RX_MSB;
etx = SSP_TX_MSB;
}
SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
SSP_CR1_MASK_RXIFLSEL_ST, 7);
SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
SSP_CR1_MASK_TXIFLSEL_ST, 10);
} else {
SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
SSP_WRITE_BITS(chip->cr0, bits - 1,
SSP_CR0_MASK_DSS, 0);
SSP_WRITE_BITS(chip->cr0, chip_info->iface,
SSP_CR0_MASK_FRF, 4);
}
/* Stuff that is common for all versions */
SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
if (spi->mode & SPI_CPOL)
tmp = SSP_CLK_POL_IDLE_HIGH;
else
tmp = SSP_CLK_POL_IDLE_LOW;
SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
if (spi->mode & SPI_CPHA)
tmp = SSP_CLK_SECOND_EDGE;
else
tmp = SSP_CLK_FIRST_EDGE;
SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
/* Loopback is available on all versions except PL023 */
if (!pl022->vendor->pl023)
SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
if (!pl022->vendor->pl023) {
if (spi->mode & SPI_LOOP)
tmp = LOOPBACK_ENABLED;
else
tmp = LOOPBACK_DISABLED;
SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
}
SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
......@@ -1704,7 +2003,7 @@ static int pl022_setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
return status;
err_config_params:
err_first_setup:
spi_set_ctldata(spi, NULL);
kfree(chip);
return status;
}
......@@ -1766,12 +2065,21 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
master->setup = pl022_setup;
master->transfer = pl022_transfer;
/*
* Supports mode 0-3, loopback, and active low CS. Transfers are
* always MS bit first on the original pl022.
*/
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
if (pl022->vendor->extended_cr)
master->mode_bits |= SPI_LSB_FIRST;
dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
status = amba_request_regions(adev, NULL);
if (status)
goto err_no_ioregion;
pl022->phybase = adev->res.start;
pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
if (pl022->virtbase == NULL) {
status = -ENOMEM;
......@@ -1798,6 +2106,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
goto err_no_irq;
}
/* Get DMA channels */
if (platform_info->enable_dma) {
status = pl022_dma_probe(pl022);
if (status != 0)
goto err_no_dma;
}
/* Initialize and start queue */
status = init_queue(pl022);
if (status != 0) {
......@@ -1826,6 +2142,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
err_start_queue:
err_init_queue:
destroy_queue(pl022);
pl022_dma_remove(pl022);
err_no_dma:
free_irq(adev->irq[0], pl022);
err_no_irq:
clk_put(pl022->clk);
......@@ -1856,6 +2174,7 @@ pl022_remove(struct amba_device *adev)
return status;
}
load_ssp_default_config(pl022);
pl022_dma_remove(pl022);
free_irq(adev->irq[0], pl022);
clk_disable(pl022->clk);
clk_put(pl022->clk);
......
......@@ -654,6 +654,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
struct spi_transfer *xfer;
unsigned long flags;
struct device *controller = spi->master->dev.parent;
u8 bits;
struct atmel_spi_device *asd;
as = spi_master_get_devdata(spi->master);
......@@ -672,8 +674,18 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
return -EINVAL;
}
if (xfer->bits_per_word) {
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
dev_dbg(&spi->dev, "you can't yet change "
"bits_per_word in transfers\n");
return -ENOPROTOOPT;
}
}
/* FIXME implement these protocol options!! */
if (xfer->bits_per_word || xfer->speed_hz) {
if (xfer->speed_hz) {
dev_dbg(&spi->dev, "no protocol options yet\n");
return -ENOPROTOOPT;
}
......
......@@ -296,6 +296,19 @@ static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
return 0;
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
while (!(__raw_readl(reg) & bit)) {
if (time_after(jiffies, timeout))
return -1;
cpu_relax();
}
return 0;
}
static unsigned
omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
{
......@@ -309,11 +322,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
u32 l;
u8 * rx;
const u8 * tx;
void __iomem *chstat_reg;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
l = mcspi_cached_chconf0(spi);
chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
count = xfer->len;
c = count;
word_len = cs->word_len;
......@@ -382,6 +398,16 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
if (tx != NULL) {
wait_for_completion(&mcspi_dma->dma_tx_completion);
dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
/* for TX_ONLY mode, be sure all words have shifted out */
if (rx == NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0)
dev_err(&spi->dev, "TXS timed out\n");
else if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_EOT) < 0)
dev_err(&spi->dev, "EOT timed out\n");
}
}
if (rx != NULL) {
......@@ -435,19 +461,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
return count;
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
while (!(__raw_readl(reg) & bit)) {
if (time_after(jiffies, timeout))
return -1;
cpu_relax();
}
return 0;
}
static unsigned
omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
......@@ -489,10 +502,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "TXS timed out\n");
goto out;
}
#ifdef VERBOSE
dev_dbg(&spi->dev, "write-%d %02x\n",
dev_vdbg(&spi->dev, "write-%d %02x\n",
word_len, *tx);
#endif
__raw_writel(*tx++, tx_reg);
}
if (rx != NULL) {
......@@ -506,10 +517,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %02x\n",
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
#endif
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev,
......@@ -522,10 +531,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %02x\n",
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
#endif
}
} while (c);
} else if (word_len <= 16) {
......@@ -542,10 +549,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "TXS timed out\n");
goto out;
}
#ifdef VERBOSE
dev_dbg(&spi->dev, "write-%d %04x\n",
dev_vdbg(&spi->dev, "write-%d %04x\n",
word_len, *tx);
#endif
__raw_writel(*tx++, tx_reg);
}
if (rx != NULL) {
......@@ -559,10 +564,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %04x\n",
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
#endif
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev,
......@@ -575,10 +578,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %04x\n",
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
#endif
}
} while (c);
} else if (word_len <= 32) {
......@@ -595,10 +596,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "TXS timed out\n");
goto out;
}
#ifdef VERBOSE
dev_dbg(&spi->dev, "write-%d %08x\n",
dev_vdbg(&spi->dev, "write-%d %08x\n",
word_len, *tx);
#endif
__raw_writel(*tx++, tx_reg);
}
if (rx != NULL) {
......@@ -612,10 +611,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %08x\n",
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
#endif
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev,
......@@ -628,10 +625,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %08x\n",
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
#endif
}
} while (c);
}
......@@ -644,6 +639,12 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
} else if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_EOT) < 0)
dev_err(&spi->dev, "EOT timed out\n");
/* disable chan to purge rx datas received in TX_ONLY transfer,
* otherwise these rx datas will affect the direct following
* RX_ONLY transfer.
*/
omap2_mcspi_set_enable(spi, 0);
}
out:
omap2_mcspi_set_enable(spi, 1);
......
......@@ -404,7 +404,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
goto msg_rejected;
}
if ((t != NULL) && t->bits_per_word)
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
if ((bits_per_word != 8) && (bits_per_word != 16)) {
......@@ -415,7 +415,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
goto msg_rejected;
}
/*make sure buffer length is even when working in 16 bit mode*/
if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) {
if ((t->bits_per_word == 16) && (t->len & 1)) {
dev_err(&spi->dev,
"message rejected : "
"odd data length (%d) while in 16 bit mode\n",
......
/*
* Blackfin On-Chip SPI Driver
*
* Copyright 2004-2007 Analog Devices Inc.
* Copyright 2004-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
......@@ -41,13 +41,16 @@ MODULE_LICENSE("GPL");
#define RUNNING_STATE ((void *)1)
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
#define QUEUE_RUNNING 0
#define QUEUE_STOPPED 1
/* Value to send if no TX value is supplied */
#define SPI_IDLE_TXVAL 0x0000
struct bfin_spi_master_data;
struct driver_data {
struct bfin_spi_transfer_ops {
void (*write) (struct bfin_spi_master_data *);
void (*read) (struct bfin_spi_master_data *);
void (*duplex) (struct bfin_spi_master_data *);
};
struct bfin_spi_master_data {
/* Driver model hookup */
struct platform_device *pdev;
......@@ -69,7 +72,7 @@ struct driver_data {
spinlock_t lock;
struct list_head queue;
int busy;
int run;
bool running;
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
......@@ -77,7 +80,7 @@ struct driver_data {
/* Current message transfer state info */
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
struct bfin_spi_slave_data *cur_chip;
size_t len_in_bytes;
size_t len;
void *tx;
......@@ -92,38 +95,37 @@ struct driver_data {
dma_addr_t rx_dma;
dma_addr_t tx_dma;
int irq_requested;
int spi_irq;
size_t rx_map_len;
size_t tx_map_len;
u8 n_bytes;
u16 ctrl_reg;
u16 flag_reg;
int cs_change;
void (*write) (struct driver_data *);
void (*read) (struct driver_data *);
void (*duplex) (struct driver_data *);
const struct bfin_spi_transfer_ops *ops;
};
struct chip_data {
struct bfin_spi_slave_data {
u16 ctl_reg;
u16 baud;
u16 flag;
u8 chip_select_num;
u8 n_bytes;
u8 width; /* 0 or 1 */
u8 enable_dma;
u8 bits_per_word; /* 8 or 16 */
u8 cs_change_per_word;
u16 cs_chg_udelay; /* Some devices require > 255usec delay */
u32 cs_gpio;
u16 idle_tx_val;
void (*write) (struct driver_data *);
void (*read) (struct driver_data *);
void (*duplex) (struct driver_data *);
u8 pio_interrupt; /* use spi data irq */
const struct bfin_spi_transfer_ops *ops;
};
#define DEFINE_SPI_REG(reg, off) \
static inline u16 read_##reg(struct driver_data *drv_data) \
static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \
{ return bfin_read16(drv_data->regs_base + off); } \
static inline void write_##reg(struct driver_data *drv_data, u16 v) \
static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \
{ bfin_write16(drv_data->regs_base + off, v); }
DEFINE_SPI_REG(CTRL, 0x00)
......@@ -134,7 +136,7 @@ DEFINE_SPI_REG(RDBR, 0x10)
DEFINE_SPI_REG(BAUD, 0x14)
DEFINE_SPI_REG(SHAW, 0x18)
static void bfin_spi_enable(struct driver_data *drv_data)
static void bfin_spi_enable(struct bfin_spi_master_data *drv_data)
{
u16 cr;
......@@ -142,7 +144,7 @@ static void bfin_spi_enable(struct driver_data *drv_data)
write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
}
static void bfin_spi_disable(struct driver_data *drv_data)
static void bfin_spi_disable(struct bfin_spi_master_data *drv_data)
{
u16 cr;
......@@ -165,7 +167,7 @@ static u16 hz_to_spi_baud(u32 speed_hz)
return spi_baud;
}
static int bfin_spi_flush(struct driver_data *drv_data)
static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
......@@ -179,13 +181,12 @@ static int bfin_spi_flush(struct driver_data *drv_data)
}
/* Chip select operation functions for cs_change flag */
static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip)
static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip)
{
if (likely(chip->chip_select_num)) {
if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
u16 flag = read_FLAG(drv_data);
flag |= chip->flag;
flag &= ~(chip->flag << 8);
flag &= ~chip->flag;
write_FLAG(drv_data, flag);
} else {
......@@ -193,13 +194,13 @@ static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *c
}
}
static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip)
static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
if (likely(chip->chip_select_num)) {
if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
u16 flag = read_FLAG(drv_data);
flag &= ~chip->flag;
flag |= (chip->flag << 8);
flag |= chip->flag;
write_FLAG(drv_data, flag);
} else {
......@@ -211,16 +212,43 @@ static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data
udelay(chip->cs_chg_udelay);
}
/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
if (chip->chip_select_num < MAX_CTRL_CS) {
u16 flag = read_FLAG(drv_data);
flag |= (chip->flag >> 8);
write_FLAG(drv_data, flag);
}
}
static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
if (chip->chip_select_num < MAX_CTRL_CS) {
u16 flag = read_FLAG(drv_data);
flag &= ~(chip->flag >> 8);
write_FLAG(drv_data, flag);
}
}
/* stop controller and re-config current chip*/
static void bfin_spi_restore_state(struct driver_data *drv_data)
static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
/* Clear status and disable clock */
write_STAT(drv_data, BIT_STAT_CLR);
bfin_spi_disable(drv_data);
dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
SSYNC();
/* Load the registers */
write_CTRL(drv_data, chip->ctl_reg);
write_BAUD(drv_data, chip->baud);
......@@ -230,49 +258,12 @@ static void bfin_spi_restore_state(struct driver_data *drv_data)
}
/* used to kick off transfer in rx mode and read unwanted RX data */
static inline void bfin_spi_dummy_read(struct driver_data *drv_data)
static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data)
{
(void) read_RDBR(drv_data);
}
static void bfin_spi_null_writer(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
u16 tx_val = drv_data->cur_chip->idle_tx_val;
/* clear RXS (we check for RXS inside the loop) */
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, tx_val);
drv_data->tx += n_bytes;
/* wait until transfer finished.
checking SPIF or TXS may not guarantee transfer completion */
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
/* discard RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
}
}
static void bfin_spi_null_reader(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
u16 tx_val = drv_data->cur_chip->idle_tx_val;
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
write_TDBR(drv_data, tx_val);
drv_data->rx += n_bytes;
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
bfin_spi_dummy_read(drv_data);
}
}
static void bfin_spi_u8_writer(struct driver_data *drv_data)
static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
{
/* clear RXS (we check for RXS inside the loop) */
bfin_spi_dummy_read(drv_data);
......@@ -288,25 +279,7 @@ static void bfin_spi_u8_writer(struct driver_data *drv_data)
}
}
static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
/* clear RXS (we check for RXS inside the loop) */
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
bfin_spi_cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
/* make sure transfer finished before deactiving CS */
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
bfin_spi_dummy_read(drv_data);
bfin_spi_cs_deactive(drv_data, chip);
}
}
static void bfin_spi_u8_reader(struct driver_data *drv_data)
static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data)
{
u16 tx_val = drv_data->cur_chip->idle_tx_val;
......@@ -321,25 +294,7 @@ static void bfin_spi_u8_reader(struct driver_data *drv_data)
}
}
static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
u16 tx_val = chip->idle_tx_val;
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
bfin_spi_cs_active(drv_data, chip);
write_TDBR(drv_data, tx_val);
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
*(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
bfin_spi_cs_deactive(drv_data, chip);
}
}
static void bfin_spi_u8_duplex(struct driver_data *drv_data)
static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data)
{
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
......@@ -352,24 +307,13 @@ static void bfin_spi_u8_duplex(struct driver_data *drv_data)
}
}
static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
bfin_spi_cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
*(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
bfin_spi_cs_deactive(drv_data, chip);
}
}
static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = {
.write = bfin_spi_u8_writer,
.read = bfin_spi_u8_reader,
.duplex = bfin_spi_u8_duplex,
};
static void bfin_spi_u16_writer(struct driver_data *drv_data)
static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data)
{
/* clear RXS (we check for RXS inside the loop) */
bfin_spi_dummy_read(drv_data);
......@@ -386,26 +330,7 @@ static void bfin_spi_u16_writer(struct driver_data *drv_data)
}
}
static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
/* clear RXS (we check for RXS inside the loop) */
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
bfin_spi_cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
drv_data->tx += 2;
/* make sure transfer finished before deactiving CS */
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
bfin_spi_dummy_read(drv_data);
bfin_spi_cs_deactive(drv_data, chip);
}
}
static void bfin_spi_u16_reader(struct driver_data *drv_data)
static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data)
{
u16 tx_val = drv_data->cur_chip->idle_tx_val;
......@@ -421,26 +346,7 @@ static void bfin_spi_u16_reader(struct driver_data *drv_data)
}
}
static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
u16 tx_val = chip->idle_tx_val;
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
bfin_spi_cs_active(drv_data, chip);
write_TDBR(drv_data, tx_val);
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
*(u16 *) (drv_data->rx) = read_RDBR(drv_data);
drv_data->rx += 2;
bfin_spi_cs_deactive(drv_data, chip);
}
}
static void bfin_spi_u16_duplex(struct driver_data *drv_data)
static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data)
{
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
......@@ -455,27 +361,14 @@ static void bfin_spi_u16_duplex(struct driver_data *drv_data)
}
}
static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
bfin_spi_cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
drv_data->tx += 2;
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
*(u16 *) (drv_data->rx) = read_RDBR(drv_data);
drv_data->rx += 2;
bfin_spi_cs_deactive(drv_data, chip);
}
}
static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = {
.write = bfin_spi_u16_writer,
.read = bfin_spi_u16_reader,
.duplex = bfin_spi_u16_duplex,
};
/* test if ther is more transfer to be done */
static void *bfin_spi_next_transfer(struct driver_data *drv_data)
/* test if there is more transfer to be done */
static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
struct spi_transfer *trans = drv_data->cur_transfer;
......@@ -494,9 +387,9 @@ static void *bfin_spi_next_transfer(struct driver_data *drv_data)
* caller already set message->status;
* dma and pio irqs are blocked give finished message back
*/
static void bfin_spi_giveback(struct driver_data *drv_data)
static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
......@@ -525,10 +418,83 @@ static void bfin_spi_giveback(struct driver_data *drv_data)
msg->complete(msg->context);
}
/* spi data irq handler */
static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
{
struct bfin_spi_master_data *drv_data = dev_id;
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
int n_bytes = drv_data->n_bytes;
/* wait until transfer finished. */
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
(drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) {
/* last read */
if (drv_data->rx) {
dev_dbg(&drv_data->pdev->dev, "last read\n");
if (n_bytes == 2)
*(u16 *) (drv_data->rx) = read_RDBR(drv_data);
else if (n_bytes == 1)
*(u8 *) (drv_data->rx) = read_RDBR(drv_data);
drv_data->rx += n_bytes;
}
msg->actual_length += drv_data->len_in_bytes;
if (drv_data->cs_change)
bfin_spi_cs_deactive(drv_data, chip);
/* Move to next transfer */
msg->state = bfin_spi_next_transfer(drv_data);
disable_irq_nosync(drv_data->spi_irq);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
return IRQ_HANDLED;
}
if (drv_data->rx && drv_data->tx) {
/* duplex */
dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
if (drv_data->n_bytes == 2) {
*(u16 *) (drv_data->rx) = read_RDBR(drv_data);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
} else if (drv_data->n_bytes == 1) {
*(u8 *) (drv_data->rx) = read_RDBR(drv_data);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
}
} else if (drv_data->rx) {
/* read */
dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
if (drv_data->n_bytes == 2)
*(u16 *) (drv_data->rx) = read_RDBR(drv_data);
else if (drv_data->n_bytes == 1)
*(u8 *) (drv_data->rx) = read_RDBR(drv_data);
write_TDBR(drv_data, chip->idle_tx_val);
} else if (drv_data->tx) {
/* write */
dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
bfin_spi_dummy_read(drv_data);
if (drv_data->n_bytes == 2)
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
else if (drv_data->n_bytes == 1)
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
}
if (drv_data->tx)
drv_data->tx += n_bytes;
if (drv_data->rx)
drv_data->rx += n_bytes;
return IRQ_HANDLED;
}
static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
struct chip_data *chip = drv_data->cur_chip;
struct bfin_spi_master_data *drv_data = dev_id;
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
unsigned long timeout;
unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
......@@ -540,10 +506,6 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
clear_dma_irqstat(drv_data->dma_channel);
/* Wait for DMA to complete */
while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN)
cpu_relax();
/*
* wait for the last transaction shifted out. HRM states:
* at this point there may still be data in the SPI DMA FIFO waiting
......@@ -551,8 +513,8 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
* register until it goes low for 2 successive reads
*/
if (drv_data->tx != NULL) {
while ((read_STAT(drv_data) & TXS) ||
(read_STAT(drv_data) & TXS))
while ((read_STAT(drv_data) & BIT_STAT_TXS) ||
(read_STAT(drv_data) & BIT_STAT_TXS))
cpu_relax();
}
......@@ -561,14 +523,14 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
dmastat, read_STAT(drv_data));
timeout = jiffies + HZ;
while (!(read_STAT(drv_data) & SPIF))
while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
if (!time_before(jiffies, timeout)) {
dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
break;
} else
cpu_relax();
if ((dmastat & DMA_ERR) && (spistat & RBSY)) {
if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) {
msg->state = ERROR_STATE;
dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n");
} else {
......@@ -588,20 +550,20 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
dev_dbg(&drv_data->pdev->dev,
"disable dma channel irq%d\n",
drv_data->dma_channel);
dma_disable_irq(drv_data->dma_channel);
dma_disable_irq_nosync(drv_data->dma_channel);
return IRQ_HANDLED;
}
static void bfin_spi_pump_transfers(unsigned long data)
{
struct driver_data *drv_data = (struct driver_data *)data;
struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data;
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct chip_data *chip = NULL;
u8 width;
u16 cr, dma_width, dma_config;
struct bfin_spi_slave_data *chip = NULL;
unsigned int bits_per_word;
u16 cr, cr_width, dma_width, dma_config;
u32 tranf_success = 1;
u8 full_duplex = 0;
......@@ -639,7 +601,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
udelay(previous->delay_usecs);
}
/* Setup the transfer state based on the type of transfer */
/* Flush any existing transfers that may be sitting in the hardware */
if (bfin_spi_flush(drv_data) == 0) {
dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
message->status = -EIO;
......@@ -679,52 +641,31 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
switch (transfer->bits_per_word) {
case 8:
bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
if (bits_per_word == 8) {
drv_data->n_bytes = 1;
width = CFG_SPI_WORDSIZE8;
drv_data->read = chip->cs_change_per_word ?
bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader;
drv_data->write = chip->cs_change_per_word ?
bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer;
drv_data->duplex = chip->cs_change_per_word ?
bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex;
break;
case 16:
drv_data->len = transfer->len;
cr_width = 0;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
} else if (bits_per_word == 16) {
drv_data->n_bytes = 2;
width = CFG_SPI_WORDSIZE16;
drv_data->read = chip->cs_change_per_word ?
bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader;
drv_data->write = chip->cs_change_per_word ?
bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer;
drv_data->duplex = chip->cs_change_per_word ?
bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex;
break;
default:
/* No change, the same as default setting */
drv_data->n_bytes = chip->n_bytes;
width = chip->width;
drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer;
drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader;
drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer;
break;
}
cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
cr |= (width << 8);
write_CTRL(drv_data, cr);
if (width == CFG_SPI_WORDSIZE16) {
drv_data->len = (transfer->len) >> 1;
cr_width = BIT_CTL_WORDSIZE;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
} else {
drv_data->len = transfer->len;
dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n");
message->status = -EINVAL;
bfin_spi_giveback(drv_data);
return;
}
cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
cr |= cr_width;
write_CTRL(drv_data, cr);
dev_dbg(&drv_data->pdev->dev,
"transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n",
drv_data->write, chip->write, bfin_spi_null_writer);
"transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8);
/* speed and width has been set on per message */
message->state = RUNNING_STATE;
dma_config = 0;
......@@ -735,13 +676,11 @@ static void bfin_spi_pump_transfers(unsigned long data)
write_BAUD(drv_data, chip->baud);
write_STAT(drv_data, BIT_STAT_CLR);
cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
if (drv_data->cs_change)
bfin_spi_cs_active(drv_data, chip);
dev_dbg(&drv_data->pdev->dev,
"now pumping a transfer: width is %d, len is %d\n",
width, transfer->len);
cr_width, transfer->len);
/*
* Try to map dma buffer and do a dma transfer. If successful use,
......@@ -760,7 +699,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* config dma channel */
dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
set_dma_x_count(drv_data->dma_channel, drv_data->len);
if (width == CFG_SPI_WORDSIZE16) {
if (cr_width == BIT_CTL_WORDSIZE) {
set_dma_x_modify(drv_data->dma_channel, 2);
dma_width = WDSIZE_16;
} else {
......@@ -846,14 +785,41 @@ static void bfin_spi_pump_transfers(unsigned long data)
dma_enable_irq(drv_data->dma_channel);
local_irq_restore(flags);
} else {
/* IO mode write then read */
dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
return;
}
/*
* We always use SPI_WRITE mode (transfer starts with TDBR write).
* SPI_READ mode (transfer starts with RDBR read) seems to have
* problems with setting up the output value in TDBR prior to the
* start of the transfer.
*/
write_CTRL(drv_data, cr | BIT_CTL_TXMOD);
/* we always use SPI_WRITE mode. SPI_READ mode
seems to have problems with setting up the
output value in TDBR prior to the transfer. */
write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
if (chip->pio_interrupt) {
/* SPI irq should have been disabled by now */
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
/* start transfer */
if (drv_data->tx == NULL)
write_TDBR(drv_data, chip->idle_tx_val);
else {
if (bits_per_word == 8)
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
else
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
drv_data->tx += drv_data->n_bytes;
}
/* once TDBR is empty, interrupt is triggered */
enable_irq(drv_data->spi_irq);
return;
}
/* IO mode */
dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
if (full_duplex) {
/* full duplex mode */
......@@ -862,7 +828,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
dev_dbg(&drv_data->pdev->dev,
"IO duplex: cr is 0x%x\n", cr);
drv_data->duplex(drv_data);
drv_data->ops->duplex(drv_data);
if (drv_data->tx != drv_data->tx_end)
tranf_success = 0;
......@@ -871,7 +837,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
dev_dbg(&drv_data->pdev->dev,
"IO write: cr is 0x%x\n", cr);
drv_data->write(drv_data);
drv_data->ops->write(drv_data);
if (drv_data->tx != drv_data->tx_end)
tranf_success = 0;
......@@ -880,7 +846,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
dev_dbg(&drv_data->pdev->dev,
"IO read: cr is 0x%x\n", cr);
drv_data->read(drv_data);
drv_data->ops->read(drv_data);
if (drv_data->rx != drv_data->rx_end)
tranf_success = 0;
}
......@@ -897,22 +863,22 @@ static void bfin_spi_pump_transfers(unsigned long data)
if (drv_data->cs_change)
bfin_spi_cs_deactive(drv_data, chip);
}
/* Schedule next transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
}
}
/* pop a msg from queue and kick off real transfer */
static void bfin_spi_pump_messages(struct work_struct *work)
{
struct driver_data *drv_data;
struct bfin_spi_master_data *drv_data;
unsigned long flags;
drv_data = container_of(work, struct driver_data, pump_messages);
drv_data = container_of(work, struct bfin_spi_master_data, pump_messages);
/* Lock queue and check for queue work */
spin_lock_irqsave(&drv_data->lock, flags);
if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
if (list_empty(&drv_data->queue) || !drv_data->running) {
/* pumper kicked off but no work to do */
drv_data->busy = 0;
spin_unlock_irqrestore(&drv_data->lock, flags);
......@@ -962,12 +928,12 @@ static void bfin_spi_pump_messages(struct work_struct *work)
*/
static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
{
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
if (drv_data->run == QUEUE_STOPPED) {
if (!drv_data->running) {
spin_unlock_irqrestore(&drv_data->lock, flags);
return -ESHUTDOWN;
}
......@@ -979,7 +945,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
dev_dbg(&spi->dev, "adding an msg in transfer() \n");
list_add_tail(&msg->queue, &drv_data->queue);
if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
if (drv_data->running && !drv_data->busy)
queue_work(drv_data->workqueue, &drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
......@@ -1003,147 +969,184 @@ static u16 ssel[][MAX_SPI_SSEL] = {
P_SPI2_SSEL6, P_SPI2_SSEL7},
};
/* first setup for new devices */
/* setup for devices (may be called multiple times -- not just first setup) */
static int bfin_spi_setup(struct spi_device *spi)
{
struct bfin5xx_spi_chip *chip_info = NULL;
struct chip_data *chip;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
int ret;
if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
return -EINVAL;
struct bfin5xx_spi_chip *chip_info;
struct bfin_spi_slave_data *chip = NULL;
struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
u16 bfin_ctl_reg;
int ret = -EINVAL;
/* Only alloc (or use chip_info) on first setup */
chip_info = NULL;
chip = spi_get_ctldata(spi);
if (chip == NULL) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip) {
dev_err(&spi->dev, "cannot allocate chip data\n");
ret = -ENOMEM;
goto error;
}
chip->enable_dma = 0;
chip_info = spi->controller_data;
}
/* Let people set non-standard bits directly */
bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO |
BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ;
/* chip_info isn't always needed */
if (chip_info) {
/* Make sure people stop trying to set fields via ctl_reg
* when they should actually be using common SPI framework.
* Currently we let through: WOM EMISO PSSE GM SZ TIMOD.
* Currently we let through: WOM EMISO PSSE GM SZ.
* Not sure if a user actually needs/uses any of these,
* but let's assume (for now) they do.
*/
if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) {
if (chip_info->ctl_reg & ~bfin_ctl_reg) {
dev_err(&spi->dev, "do not set bits in ctl_reg "
"that the SPI framework manages\n");
return -EINVAL;
goto error;
}
chip->enable_dma = chip_info->enable_dma != 0
&& drv_data->master_info->enable_dma;
chip->ctl_reg = chip_info->ctl_reg;
chip->bits_per_word = chip_info->bits_per_word;
chip->cs_change_per_word = chip_info->cs_change_per_word;
chip->cs_chg_udelay = chip_info->cs_chg_udelay;
chip->cs_gpio = chip_info->cs_gpio;
chip->idle_tx_val = chip_info->idle_tx_val;
chip->pio_interrupt = chip_info->pio_interrupt;
spi->bits_per_word = chip_info->bits_per_word;
} else {
/* force a default base state */
chip->ctl_reg &= bfin_ctl_reg;
}
if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
dev_err(&spi->dev, "%d bits_per_word is not supported\n",
spi->bits_per_word);
goto error;
}
/* translate common spi framework into our register */
if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
dev_err(&spi->dev, "unsupported spi modes detected\n");
goto error;
}
if (spi->mode & SPI_CPOL)
chip->ctl_reg |= CPOL;
chip->ctl_reg |= BIT_CTL_CPOL;
if (spi->mode & SPI_CPHA)
chip->ctl_reg |= CPHA;
chip->ctl_reg |= BIT_CTL_CPHA;
if (spi->mode & SPI_LSB_FIRST)
chip->ctl_reg |= LSBF;
chip->ctl_reg |= BIT_CTL_LSBF;
/* we dont support running in slave mode (yet?) */
chip->ctl_reg |= MSTR;
chip->ctl_reg |= BIT_CTL_MASTER;
/*
* Notice: for blackfin, the speed_hz is the value of register
* SPI_BAUD, not the real baudrate
*/
chip->baud = hz_to_spi_baud(spi->max_speed_hz);
chip->chip_select_num = spi->chip_select;
if (chip->chip_select_num < MAX_CTRL_CS) {
if (!(spi->mode & SPI_CPHA))
dev_warn(&spi->dev, "Warning: SPI CPHA not set:"
" Slave Select not under software control!\n"
" See Documentation/blackfin/bfin-spi-notes.txt");
chip->flag = (1 << spi->chip_select) << 8;
} else
chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS;
if (chip->enable_dma && chip->pio_interrupt) {
dev_err(&spi->dev, "enable_dma is set, "
"do not set pio_interrupt\n");
goto error;
}
/*
* if any one SPI chip is registered and wants DMA, request the
* DMA channel for it
*/
if (chip->enable_dma && !drv_data->dma_requested) {
/* register dma irq handler */
if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) {
dev_dbg(&spi->dev,
ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA");
if (ret) {
dev_err(&spi->dev,
"Unable to request BlackFin SPI DMA channel\n");
return -ENODEV;
goto error;
}
if (set_dma_callback(drv_data->dma_channel,
bfin_spi_dma_irq_handler, drv_data) < 0) {
dev_dbg(&spi->dev, "Unable to set dma callback\n");
return -EPERM;
drv_data->dma_requested = 1;
ret = set_dma_callback(drv_data->dma_channel,
bfin_spi_dma_irq_handler, drv_data);
if (ret) {
dev_err(&spi->dev, "Unable to set dma callback\n");
goto error;
}
dma_disable_irq(drv_data->dma_channel);
drv_data->dma_requested = 1;
}
/*
* Notice: for blackfin, the speed_hz is the value of register
* SPI_BAUD, not the real baudrate
*/
chip->baud = hz_to_spi_baud(spi->max_speed_hz);
chip->flag = 1 << (spi->chip_select);
chip->chip_select_num = spi->chip_select;
if (chip->pio_interrupt && !drv_data->irq_requested) {
ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler,
IRQF_DISABLED, "BFIN_SPI", drv_data);
if (ret) {
dev_err(&spi->dev, "Unable to register spi IRQ\n");
goto error;
}
drv_data->irq_requested = 1;
/* we use write mode, spi irq has to be disabled here */
disable_irq(drv_data->spi_irq);
}
if (chip->chip_select_num == 0) {
if (chip->chip_select_num >= MAX_CTRL_CS) {
ret = gpio_request(chip->cs_gpio, spi->modalias);
if (ret) {
if (drv_data->dma_requested)
free_dma(drv_data->dma_channel);
return ret;
dev_err(&spi->dev, "gpio_request() error\n");
goto pin_error;
}
gpio_direction_output(chip->cs_gpio, 1);
}
switch (chip->bits_per_word) {
case 8:
chip->n_bytes = 1;
chip->width = CFG_SPI_WORDSIZE8;
chip->read = chip->cs_change_per_word ?
bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader;
chip->write = chip->cs_change_per_word ?
bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer;
chip->duplex = chip->cs_change_per_word ?
bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex;
break;
case 16:
chip->n_bytes = 2;
chip->width = CFG_SPI_WORDSIZE16;
chip->read = chip->cs_change_per_word ?
bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader;
chip->write = chip->cs_change_per_word ?
bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer;
chip->duplex = chip->cs_change_per_word ?
bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex;
break;
default:
dev_err(&spi->dev, "%d bits_per_word is not supported\n",
chip->bits_per_word);
if (chip_info)
kfree(chip);
return -ENODEV;
}
dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
spi->modalias, chip->width, chip->enable_dma);
spi->modalias, spi->bits_per_word, chip->enable_dma);
dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
chip->ctl_reg, chip->flag);
spi_set_ctldata(spi, chip);
dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
if ((chip->chip_select_num > 0)
&& (chip->chip_select_num <= spi->master->num_chipselect))
peripheral_request(ssel[spi->master->bus_num]
if (chip->chip_select_num < MAX_CTRL_CS) {
ret = peripheral_request(ssel[spi->master->bus_num]
[chip->chip_select_num-1], spi->modalias);
if (ret) {
dev_err(&spi->dev, "peripheral_request() error\n");
goto pin_error;
}
}
bfin_spi_cs_enable(drv_data, chip);
bfin_spi_cs_deactive(drv_data, chip);
return 0;
pin_error:
if (chip->chip_select_num >= MAX_CTRL_CS)
gpio_free(chip->cs_gpio);
else
peripheral_free(ssel[spi->master->bus_num]
[chip->chip_select_num - 1]);
error:
if (chip) {
if (drv_data->dma_requested)
free_dma(drv_data->dma_channel);
drv_data->dma_requested = 0;
kfree(chip);
/* prevent free 'chip' twice */
spi_set_ctldata(spi, NULL);
}
return ret;
}
/*
......@@ -1152,28 +1155,30 @@ static int bfin_spi_setup(struct spi_device *spi)
*/
static void bfin_spi_cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
struct bfin_spi_slave_data *chip = spi_get_ctldata(spi);
struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
if (!chip)
return;
if ((chip->chip_select_num > 0)
&& (chip->chip_select_num <= spi->master->num_chipselect))
if (chip->chip_select_num < MAX_CTRL_CS) {
peripheral_free(ssel[spi->master->bus_num]
[chip->chip_select_num-1]);
if (chip->chip_select_num == 0)
bfin_spi_cs_disable(drv_data, chip);
} else
gpio_free(chip->cs_gpio);
kfree(chip);
/* prevent free 'chip' twice */
spi_set_ctldata(spi, NULL);
}
static inline int bfin_spi_init_queue(struct driver_data *drv_data)
static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
{
INIT_LIST_HEAD(&drv_data->queue);
spin_lock_init(&drv_data->lock);
drv_data->run = QUEUE_STOPPED;
drv_data->running = false;
drv_data->busy = 0;
/* init transfer tasklet */
......@@ -1190,18 +1195,18 @@ static inline int bfin_spi_init_queue(struct driver_data *drv_data)
return 0;
}
static inline int bfin_spi_start_queue(struct driver_data *drv_data)
static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
{
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
if (drv_data->running || drv_data->busy) {
spin_unlock_irqrestore(&drv_data->lock, flags);
return -EBUSY;
}
drv_data->run = QUEUE_RUNNING;
drv_data->running = true;
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
drv_data->cur_chip = NULL;
......@@ -1212,7 +1217,7 @@ static inline int bfin_spi_start_queue(struct driver_data *drv_data)
return 0;
}
static inline int bfin_spi_stop_queue(struct driver_data *drv_data)
static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
{
unsigned long flags;
unsigned limit = 500;
......@@ -1226,7 +1231,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data)
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead
*/
drv_data->run = QUEUE_STOPPED;
drv_data->running = false;
while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
spin_unlock_irqrestore(&drv_data->lock, flags);
msleep(10);
......@@ -1241,7 +1246,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data)
return status;
}
static inline int bfin_spi_destroy_queue(struct driver_data *drv_data)
static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
{
int status;
......@@ -1259,14 +1264,14 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
struct spi_master *master;
struct driver_data *drv_data = 0;
struct bfin_spi_master_data *drv_data;
struct resource *res;
int status = 0;
platform_info = dev->platform_data;
/* Allocate master with space for drv_data */
master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
master = spi_alloc_master(dev, sizeof(*drv_data));
if (!master) {
dev_err(&pdev->dev, "can not alloc spi_master\n");
return -ENOMEM;
......@@ -1302,11 +1307,19 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_ioremap;
}
drv_data->dma_channel = platform_get_irq(pdev, 0);
if (drv_data->dma_channel < 0) {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res == NULL) {
dev_err(dev, "No DMA channel specified\n");
status = -ENOENT;
goto out_error_no_dma_ch;
goto out_error_free_io;
}
drv_data->dma_channel = res->start;
drv_data->spi_irq = platform_get_irq(pdev, 0);
if (drv_data->spi_irq < 0) {
dev_err(dev, "No spi pio irq specified\n");
status = -ENOENT;
goto out_error_free_io;
}
/* Initial and start queue */
......@@ -1328,6 +1341,12 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_queue_alloc;
}
/* Reset SPI registers. If these registers were used by the boot loader,
* the sky may fall on your head if you enable the dma controller.
*/
write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
write_FLAG(drv_data, 0xFF00);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_master(master);
......@@ -1343,7 +1362,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
out_error_queue_alloc:
bfin_spi_destroy_queue(drv_data);
out_error_no_dma_ch:
out_error_free_io:
iounmap((void *) drv_data->regs_base);
out_error_ioremap:
out_error_get_res:
......@@ -1355,7 +1374,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
/* stop hardware and remove the driver */
static int __devexit bfin_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
if (!drv_data)
......@@ -1375,6 +1394,11 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev)
free_dma(drv_data->dma_channel);
}
if (drv_data->irq_requested) {
free_irq(drv_data->spi_irq, drv_data);
drv_data->irq_requested = 0;
}
/* Disconnect from the SPI framework */
spi_unregister_master(drv_data->master);
......@@ -1389,26 +1413,32 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
status = bfin_spi_stop_queue(drv_data);
if (status != 0)
return status;
/* stop hardware */
bfin_spi_disable(drv_data);
drv_data->ctrl_reg = read_CTRL(drv_data);
drv_data->flag_reg = read_FLAG(drv_data);
/*
* reset SPI_CTL and SPI_FLG registers
*/
write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
write_FLAG(drv_data, 0xFF00);
return 0;
}
static int bfin_spi_resume(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
/* Enable the SPI interface */
bfin_spi_enable(drv_data);
write_CTRL(drv_data, drv_data->ctrl_reg);
write_FLAG(drv_data, drv_data->flag_reg);
/* Start the queue running */
status = bfin_spi_start_queue(drv_data);
......@@ -1439,7 +1469,7 @@ static int __init bfin_spi_init(void)
{
return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe);
}
module_init(bfin_spi_init);
subsys_initcall(bfin_spi_init);
static void __exit bfin_spi_exit(void)
{
......
/*
* Freescale eSPI controller driver.
*
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_spi.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <sysdev/fsl_soc.h>
#include "spi_fsl_lib.h"
/* eSPI Controller registers */
struct fsl_espi_reg {
__be32 mode; /* 0x000 - eSPI mode register */
__be32 event; /* 0x004 - eSPI event register */
__be32 mask; /* 0x008 - eSPI mask register */
__be32 command; /* 0x00c - eSPI command register */
__be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/
__be32 receive; /* 0x014 - eSPI receive FIFO access register*/
u8 res[8]; /* 0x018 - 0x01c reserved */
__be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */
};
struct fsl_espi_transfer {
const void *tx_buf;
void *rx_buf;
unsigned len;
unsigned n_tx;
unsigned n_rx;
unsigned actual_length;
int status;
};
/* eSPI Controller mode register definitions */
#define SPMODE_ENABLE (1 << 31)
#define SPMODE_LOOP (1 << 30)
#define SPMODE_TXTHR(x) ((x) << 8)
#define SPMODE_RXTHR(x) ((x) << 0)
/* eSPI Controller CS mode register definitions */
#define CSMODE_CI_INACTIVEHIGH (1 << 31)
#define CSMODE_CP_BEGIN_EDGECLK (1 << 30)
#define CSMODE_REV (1 << 29)
#define CSMODE_DIV16 (1 << 28)
#define CSMODE_PM(x) ((x) << 24)
#define CSMODE_POL_1 (1 << 20)
#define CSMODE_LEN(x) ((x) << 16)
#define CSMODE_BEF(x) ((x) << 12)
#define CSMODE_AFT(x) ((x) << 8)
#define CSMODE_CG(x) ((x) << 3)
/* Default mode/csmode for eSPI controller */
#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
| CSMODE_AFT(0) | CSMODE_CG(1))
/* SPIE register values */
#define SPIE_NE 0x00000200 /* Not empty */
#define SPIE_NF 0x00000100 /* Not full */
/* SPIM register values */
#define SPIM_NE 0x00000200 /* Not empty */
#define SPIM_NF 0x00000100 /* Not full */
#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
/* SPCOM register values */
#define SPCOM_CS(x) ((x) << 30)
#define SPCOM_TRANLEN(x) ((x) << 0)
#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
static void fsl_espi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
struct fsl_espi_reg *reg_base = mspi->reg_base;
__be32 __iomem *mode = &reg_base->csmode[spi->chip_select];
__be32 __iomem *espi_mode = &reg_base->mode;
u32 tmp;
unsigned long flags;
/* Turn off IRQs locally to minimize time that SPI is disabled. */
local_irq_save(flags);
/* Turn off SPI unit prior changing mode */
tmp = mpc8xxx_spi_read_reg(espi_mode);
mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE);
mpc8xxx_spi_write_reg(mode, cs->hw_mode);
mpc8xxx_spi_write_reg(espi_mode, tmp);
local_irq_restore(flags);
}
static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
{
u32 data;
u16 data_h;
u16 data_l;
const u32 *tx = mpc8xxx_spi->tx;
if (!tx)
return 0;
data = *tx++ << mpc8xxx_spi->tx_shift;
data_l = data & 0xffff;
data_h = (data >> 16) & 0xffff;
swab16s(&data_l);
swab16s(&data_h);
data = data_h | data_l;
mpc8xxx_spi->tx = tx;
return data;
}
static int fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
int bits_per_word = 0;
u8 pm;
u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
}
/* spi_transfer level calls that work per-word */
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
/* Make sure its a bit width we support [4..16] */
if ((bits_per_word < 4) || (bits_per_word > 16))
return -EINVAL;
if (!hz)
hz = spi->max_speed_hz;
cs->rx_shift = 0;
cs->tx_shift = 0;
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
if (bits_per_word <= 8) {
cs->rx_shift = 8 - bits_per_word;
} else if (bits_per_word <= 16) {
cs->rx_shift = 16 - bits_per_word;
if (spi->mode & SPI_LSB_FIRST)
cs->get_tx = fsl_espi_tx_buf_lsb;
} else {
return -EINVAL;
}
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
bits_per_word = bits_per_word - 1;
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
cs->hw_mode |= CSMODE_LEN(bits_per_word);
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= CSMODE_DIV16;
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
"Will use %d Hz instead.\n", dev_name(&spi->dev),
hz, mpc8xxx_spi->spibrg / 1024);
if (pm > 16)
pm = 16;
} else {
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
}
if (pm)
pm--;
cs->hw_mode |= CSMODE_PM(pm);
fsl_espi_change_mode(spi);
return 0;
}
static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t,
unsigned int len)
{
u32 word;
struct fsl_espi_reg *reg_base = mspi->reg_base;
mspi->count = len;
/* enable rx ints */
mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
/* transmit word */
word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(&reg_base->transmit, word);
return 0;
}
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
unsigned int len = t->len;
u8 bits_per_word;
int ret;
bits_per_word = spi->bits_per_word;
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
mpc8xxx_spi->len = t->len;
len = roundup(len, 4) / 4;
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
INIT_COMPLETION(mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
" beyond the SPCOM[TRANLEN] field\n", t->len);
return -EINVAL;
}
mpc8xxx_spi_write_reg(&reg_base->command,
(SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len);
if (ret)
return ret;
wait_for_completion(&mpc8xxx_spi->done);
/* disable rx ints */
mpc8xxx_spi_write_reg(&reg_base->mask, 0);
return mpc8xxx_spi->count;
}
static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
{
if (cmd[1] && cmd[2] && cmd[3]) {
cmd[1] = (u8)(addr >> 16);
cmd[2] = (u8)(addr >> 8);
cmd[3] = (u8)(addr >> 0);
}
}
static unsigned int fsl_espi_cmd2addr(u8 *cmd)
{
if (cmd[1] && cmd[2] && cmd[3])
return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0;
return 0;
}
static void fsl_espi_do_trans(struct spi_message *m,
struct fsl_espi_transfer *tr)
{
struct spi_device *spi = m->spi;
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct fsl_espi_transfer *espi_trans = tr;
struct spi_message message;
struct spi_transfer *t, *first, trans;
int status = 0;
spi_message_init(&message);
memset(&trans, 0, sizeof(trans));
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
espi_trans->status = -EINVAL;
dev_err(mspi->dev, "bits_per_word/speed_hz should be"
" same for the same SPI transfer\n");
return;
}
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
trans.delay_usecs = max(first->delay_usecs, t->delay_usecs);
}
trans.len = espi_trans->len;
trans.tx_buf = espi_trans->tx_buf;
trans.rx_buf = espi_trans->rx_buf;
spi_message_add_tail(&trans, &message);
list_for_each_entry(t, &message.transfers, transfer_list) {
if (t->bits_per_word || t->speed_hz) {
status = -EINVAL;
status = fsl_espi_setup_transfer(spi, t);
if (status < 0)
break;
}
if (t->len)
status = fsl_espi_bufs(spi, t);
if (status) {
status = -EMSGSIZE;
break;
}
if (t->delay_usecs)
udelay(t->delay_usecs);
}
espi_trans->status = status;
fsl_espi_setup_transfer(spi, NULL);
}
static void fsl_espi_cmd_trans(struct spi_message *m,
struct fsl_espi_transfer *trans, u8 *rx_buff)
{
struct spi_transfer *t;
u8 *local_buf;
int i = 0;
struct fsl_espi_transfer *espi_trans = trans;
local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
if (!local_buf) {
espi_trans->status = -ENOMEM;
return;
}
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf) {
memcpy(local_buf + i, t->tx_buf, t->len);
i += t->len;
}
}
espi_trans->tx_buf = local_buf;
espi_trans->rx_buf = local_buf + espi_trans->n_tx;
fsl_espi_do_trans(m, espi_trans);
espi_trans->actual_length = espi_trans->len;
kfree(local_buf);
}
static void fsl_espi_rw_trans(struct spi_message *m,
struct fsl_espi_transfer *trans, u8 *rx_buff)
{
struct fsl_espi_transfer *espi_trans = trans;
unsigned int n_tx = espi_trans->n_tx;
unsigned int n_rx = espi_trans->n_rx;
struct spi_transfer *t;
u8 *local_buf;
u8 *rx_buf = rx_buff;
unsigned int trans_len;
unsigned int addr;
int i, pos, loop;
local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
if (!local_buf) {
espi_trans->status = -ENOMEM;
return;
}
for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) {
trans_len = n_rx - pos;
if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
trans_len = SPCOM_TRANLEN_MAX - n_tx;
i = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf) {
memcpy(local_buf + i, t->tx_buf, t->len);
i += t->len;
}
}
addr = fsl_espi_cmd2addr(local_buf);
addr += pos;
fsl_espi_addr2cmd(addr, local_buf);
espi_trans->n_tx = n_tx;
espi_trans->n_rx = trans_len;
espi_trans->len = trans_len + n_tx;
espi_trans->tx_buf = local_buf;
espi_trans->rx_buf = local_buf + n_tx;
fsl_espi_do_trans(m, espi_trans);
memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
if (loop > 0)
espi_trans->actual_length += espi_trans->len - n_tx;
else
espi_trans->actual_length += espi_trans->len;
}
kfree(local_buf);
}
static void fsl_espi_do_one_msg(struct spi_message *m)
{
struct spi_transfer *t;
u8 *rx_buf = NULL;
unsigned int n_tx = 0;
unsigned int n_rx = 0;
struct fsl_espi_transfer espi_trans;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf)
n_tx += t->len;
if (t->rx_buf) {
n_rx += t->len;
rx_buf = t->rx_buf;
}
}
espi_trans.n_tx = n_tx;
espi_trans.n_rx = n_rx;
espi_trans.len = n_tx + n_rx;
espi_trans.actual_length = 0;
espi_trans.status = 0;
if (!rx_buf)
fsl_espi_cmd_trans(m, &espi_trans, NULL);
else
fsl_espi_rw_trans(m, &espi_trans, rx_buf);
m->actual_length = espi_trans.actual_length;
m->status = espi_trans.status;
m->complete(m->context);
}
static int fsl_espi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
int retval;
u32 hw_mode;
u32 loop_mode;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
if (!spi->max_speed_hz)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
reg_base = mpc8xxx_spi->reg_base;
hw_mode = cs->hw_mode; /* Save orginal settings */
cs->hw_mode = mpc8xxx_spi_read_reg(
&reg_base->csmode[spi->chip_select]);
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
| CSMODE_REV);
if (spi->mode & SPI_CPHA)
cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
if (spi->mode & SPI_CPOL)
cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
if (!(spi->mode & SPI_LSB_FIRST))
cs->hw_mode |= CSMODE_REV;
/* Handle the loop mode */
loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
loop_mode &= ~SPMODE_LOOP;
if (spi->mode & SPI_LOOP)
loop_mode |= SPMODE_LOOP;
mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode);
retval = fsl_espi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
}
return 0;
}
void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
struct fsl_espi_reg *reg_base = mspi->reg_base;
/* We need handle RX first */
if (events & SPIE_NE) {
u32 rx_data;
/* Spin until RX is done */
while (SPIE_RXCNT(events) < min(4, mspi->len)) {
cpu_relax();
events = mpc8xxx_spi_read_reg(&reg_base->event);
}
mspi->len -= 4;
rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
if (mspi->rx)
mspi->get_rx(rx_data, mspi);
}
if (!(events & SPIE_NF)) {
int ret;
/* spin until TX is done */
ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
&reg_base->event)) & SPIE_NF) == 0, 1000, 0);
if (!ret) {
dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
return;
}
}
/* Clear the events */
mpc8xxx_spi_write_reg(&reg_base->event, events);
mspi->count -= 1;
if (mspi->count) {
u32 word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(&reg_base->transmit, word);
} else {
complete(&mspi->done);
}
}
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
struct mpc8xxx_spi *mspi = context_data;
struct fsl_espi_reg *reg_base = mspi->reg_base;
irqreturn_t ret = IRQ_NONE;
u32 events;
/* Get interrupt events(tx/rx) */
events = mpc8xxx_spi_read_reg(&reg_base->event);
if (events)
ret = IRQ_HANDLED;
dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
fsl_espi_cpu_irq(mspi, events);
return ret;
}
static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
{
iounmap(mspi->reg_base);
}
static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
u32 regval;
int i, ret = 0;
master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
if (!master) {
ret = -ENOMEM;
goto err;
}
dev_set_drvdata(dev, master);
ret = mpc8xxx_spi_probe(dev, mem, irq);
if (ret)
goto err_probe;
master->setup = fsl_espi_setup;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_espi_remove;
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
if (!mpc8xxx_spi->reg_base) {
ret = -ENOMEM;
goto err_probe;
}
reg_base = mpc8xxx_spi->reg_base;
/* Register for SPI Interrupt */
ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq,
0, "fsl_espi", mpc8xxx_spi);
if (ret)
goto free_irq;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
mpc8xxx_spi->rx_shift = 16;
mpc8xxx_spi->tx_shift = 24;
}
/* SPI controller initializations */
mpc8xxx_spi_write_reg(&reg_base->mode, 0);
mpc8xxx_spi_write_reg(&reg_base->mask, 0);
mpc8xxx_spi_write_reg(&reg_base->command, 0);
mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
/* Init eSPI CS mode register */
for (i = 0; i < pdata->max_chipselect; i++)
mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
ret = spi_register_master(master);
if (ret < 0)
goto unreg_master;
dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
return master;
unreg_master:
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
free_irq:
iounmap(mpc8xxx_spi->reg_base);
err_probe:
spi_master_put(master);
err:
return ERR_PTR(ret);
}
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev->platform_data;
const u32 *prop;
int len;
prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
if (!prop || len < sizeof(*prop)) {
dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
return -EINVAL;
}
pdata->max_chipselect = *prop;
pdata->cs_control = NULL;
return 0;
}
static int __devinit of_fsl_espi_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
struct resource irq;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev, ofid);
if (ret)
return ret;
ret = of_fsl_espi_get_chipselects(dev);
if (ret)
goto err;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
goto err;
ret = of_irq_to_resource(np, 0, &irq);
if (!ret) {
ret = -EINVAL;
goto err;
}
master = fsl_espi_probe(dev, &mem, irq.start);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
}
return 0;
err:
return ret;
}
static int __devexit of_fsl_espi_remove(struct platform_device *dev)
{
return mpc8xxx_spi_remove(&dev->dev);
}
static const struct of_device_id of_fsl_espi_match[] = {
{ .compatible = "fsl,mpc8536-espi" },
{}
};
MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
static struct of_platform_driver fsl_espi_driver = {
.driver = {
.name = "fsl_espi",
.owner = THIS_MODULE,
.of_match_table = of_fsl_espi_match,
},
.probe = of_fsl_espi_probe,
.remove = __devexit_p(of_fsl_espi_remove),
};
static int __init fsl_espi_init(void)
{
return of_register_platform_driver(&fsl_espi_driver);
}
module_init(fsl_espi_init);
static void __exit fsl_espi_exit(void)
{
of_unregister_platform_driver(&fsl_espi_driver);
}
module_exit(fsl_espi_exit);
MODULE_AUTHOR("Mingkai Hu");
MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
MODULE_LICENSE("GPL");
/*
* Freescale SPI/eSPI controller driver library.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/of_platform.h>
#include <linux/of_spi.h>
#include <sysdev/fsl_soc.h>
#include "spi_fsl_lib.h"
#define MPC8XXX_SPI_RX_BUF(type) \
void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
{ \
type *rx = mpc8xxx_spi->rx; \
*rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
mpc8xxx_spi->rx = rx; \
}
#define MPC8XXX_SPI_TX_BUF(type) \
u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
{ \
u32 data; \
const type *tx = mpc8xxx_spi->tx; \
if (!tx) \
return 0; \
data = *tx++ << mpc8xxx_spi->tx_shift; \
mpc8xxx_spi->tx = tx; \
return data; \
}
MPC8XXX_SPI_RX_BUF(u8)
MPC8XXX_SPI_RX_BUF(u16)
MPC8XXX_SPI_RX_BUF(u32)
MPC8XXX_SPI_TX_BUF(u8)
MPC8XXX_SPI_TX_BUF(u16)
MPC8XXX_SPI_TX_BUF(u32)
struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
{
return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
}
void mpc8xxx_spi_work(struct work_struct *work)
{
struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
work);
spin_lock_irq(&mpc8xxx_spi->lock);
while (!list_empty(&mpc8xxx_spi->queue)) {
struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
struct spi_message, queue);
list_del_init(&m->queue);
spin_unlock_irq(&mpc8xxx_spi->lock);
if (mpc8xxx_spi->spi_do_one_msg)
mpc8xxx_spi->spi_do_one_msg(m);
spin_lock_irq(&mpc8xxx_spi->lock);
}
spin_unlock_irq(&mpc8xxx_spi->lock);
}
int mpc8xxx_spi_transfer(struct spi_device *spi,
struct spi_message *m)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
unsigned long flags;
m->actual_length = 0;
m->status = -EINPROGRESS;
spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
list_add_tail(&m->queue, &mpc8xxx_spi->queue);
queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
return 0;
}
void mpc8xxx_spi_cleanup(struct spi_device *spi)
{
kfree(spi->controller_state);
}
const char *mpc8xxx_spi_strmode(unsigned int flags)
{
if (flags & SPI_QE_CPU_MODE) {
return "QE CPU";
} else if (flags & SPI_CPM_MODE) {
if (flags & SPI_QE)
return "QE";
else if (flags & SPI_CPM2)
return "CPM2";
else
return "CPM1";
}
return "CPU";
}
int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
int ret = 0;
master = dev_get_drvdata(dev);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
| SPI_LSB_FIRST | SPI_LOOP;
master->transfer = mpc8xxx_spi_transfer;
master->cleanup = mpc8xxx_spi_cleanup;
master->dev.of_node = dev->of_node;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->dev = dev;
mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
mpc8xxx_spi->flags = pdata->flags;
mpc8xxx_spi->spibrg = pdata->sysclk;
mpc8xxx_spi->irq = irq;
mpc8xxx_spi->rx_shift = 0;
mpc8xxx_spi->tx_shift = 0;
init_completion(&mpc8xxx_spi->done);
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
spin_lock_init(&mpc8xxx_spi->lock);
init_completion(&mpc8xxx_spi->done);
INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
INIT_LIST_HEAD(&mpc8xxx_spi->queue);
mpc8xxx_spi->workqueue = create_singlethread_workqueue(
dev_name(master->dev.parent));
if (mpc8xxx_spi->workqueue == NULL) {
ret = -EBUSY;
goto err;
}
return 0;
err:
return ret;
}
int __devexit mpc8xxx_spi_remove(struct device *dev)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct spi_master *master;
master = dev_get_drvdata(dev);
mpc8xxx_spi = spi_master_get_devdata(master);
flush_workqueue(mpc8xxx_spi->workqueue);
destroy_workqueue(mpc8xxx_spi->workqueue);
spi_unregister_master(master);
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
if (mpc8xxx_spi->spi_remove)
mpc8xxx_spi->spi_remove(mpc8xxx_spi);
return 0;
}
int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct mpc8xxx_spi_probe_info *pinfo;
struct fsl_spi_platform_data *pdata;
const void *prop;
int ret = -ENOMEM;
pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
pdata = &pinfo->pdata;
dev->platform_data = pdata;
/* Allocate bus num dynamically. */
pdata->bus_num = -1;
/* SPI controller is either clocked from QE or SoC clock. */
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
if (pdata->sysclk == -1) {
ret = -ENODEV;
goto err;
}
}
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
pdata->flags = SPI_QE_CPU_MODE;
else if (prop && !strcmp(prop, "qe"))
pdata->flags = SPI_CPM_MODE | SPI_QE;
else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
pdata->flags = SPI_CPM_MODE | SPI_CPM2;
else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
return 0;
err:
kfree(pinfo);
return ret;
}
/*
* Freescale SPI/eSPI controller driver library.
*
* Maintainer: Kumar Gala
*
* Copyright 2010 Freescale Semiconductor, Inc.
* Copyright (C) 2006 Polycom, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __SPI_FSL_LIB_H__
#define __SPI_FSL_LIB_H__
#include <asm/io.h>
/* SPI/eSPI Controller driver's private data. */
struct mpc8xxx_spi {
struct device *dev;
void *reg_base;
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
#ifdef CONFIG_SPI_FSL_ESPI
int len;
#endif
int subblock;
struct spi_pram __iomem *pram;
struct cpm_buf_desc __iomem *tx_bd;
struct cpm_buf_desc __iomem *rx_bd;
struct spi_transfer *xfer_in_progress;
/* dma addresses for CPM transfers */
dma_addr_t tx_dma;
dma_addr_t rx_dma;
bool map_tx_dma;
bool map_rx_dma;
dma_addr_t dma_dummy_tx;
dma_addr_t dma_dummy_rx;
/* functions to deal with different sized buffers */
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
u32(*get_tx) (struct mpc8xxx_spi *);
/* hooks for different controller driver */
void (*spi_do_one_msg) (struct spi_message *m);
void (*spi_remove) (struct mpc8xxx_spi *mspi);
unsigned int count;
unsigned int irq;
unsigned nsecs; /* (clock cycle time)/2 */
u32 spibrg; /* SPIBRG input clock */
u32 rx_shift; /* RX data reg shift when in qe mode */
u32 tx_shift; /* TX data reg shift when in qe mode */
unsigned int flags;
struct workqueue_struct *workqueue;
struct work_struct work;
struct list_head queue;
spinlock_t lock;
struct completion done;
};
struct spi_mpc8xxx_cs {
/* functions to deal with different sized buffers */
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
u32 (*get_tx) (struct mpc8xxx_spi *);
u32 rx_shift; /* RX data reg shift when in qe mode */
u32 tx_shift; /* TX data reg shift when in qe mode */
u32 hw_mode; /* Holds HW mode register settings */
};
static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
{
out_be32(reg, val);
}
static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
{
return in_be32(reg);
}
struct mpc8xxx_spi_probe_info {
struct fsl_spi_platform_data pdata;
int *gpios;
bool *alow_flags;
};
extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi);
extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi);
extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
struct fsl_spi_platform_data *pdata);
extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len);
extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
extern void mpc8xxx_spi_cleanup(struct spi_device *spi);
extern const char *mpc8xxx_spi_strmode(unsigned int flags);
extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq);
extern int mpc8xxx_spi_remove(struct device *dev);
extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev,
const struct of_device_id *ofid);
#endif /* __SPI_FSL_LIB_H__ */
/*
* MPC8xxx SPI controller driver.
* Freescale SPI controller driver.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
......@@ -15,18 +16,11 @@
* option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/platform_device.h>
......@@ -38,12 +32,12 @@
#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
#include <asm/qe.h>
#include <asm/irq.h>
#include "spi_fsl_lib.h"
/* CPM1 and CPM2 are mutually exclusive. */
#ifdef CONFIG_CPM1
......@@ -55,7 +49,7 @@
#endif
/* SPI Controller registers */
struct mpc8xxx_spi_reg {
struct fsl_spi_reg {
u8 res1[0x20];
__be32 mode;
__be32 event;
......@@ -102,112 +96,16 @@ struct mpc8xxx_spi_reg {
#define SPI_PRAM_SIZE 0x100
#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
/* SPI Controller driver's private data. */
struct mpc8xxx_spi {
struct device *dev;
struct mpc8xxx_spi_reg __iomem *base;
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
int subblock;
struct spi_pram __iomem *pram;
struct cpm_buf_desc __iomem *tx_bd;
struct cpm_buf_desc __iomem *rx_bd;
struct spi_transfer *xfer_in_progress;
/* dma addresses for CPM transfers */
dma_addr_t tx_dma;
dma_addr_t rx_dma;
bool map_tx_dma;
bool map_rx_dma;
dma_addr_t dma_dummy_tx;
dma_addr_t dma_dummy_rx;
/* functions to deal with different sized buffers */
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
u32(*get_tx) (struct mpc8xxx_spi *);
unsigned int count;
unsigned int irq;
unsigned nsecs; /* (clock cycle time)/2 */
u32 spibrg; /* SPIBRG input clock */
u32 rx_shift; /* RX data reg shift when in qe mode */
u32 tx_shift; /* TX data reg shift when in qe mode */
unsigned int flags;
struct workqueue_struct *workqueue;
struct work_struct work;
struct list_head queue;
spinlock_t lock;
struct completion done;
};
static void *mpc8xxx_dummy_rx;
static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
static int mpc8xxx_dummy_rx_refcnt;
struct spi_mpc8xxx_cs {
/* functions to deal with different sized buffers */
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
u32 (*get_tx) (struct mpc8xxx_spi *);
u32 rx_shift; /* RX data reg shift when in qe mode */
u32 tx_shift; /* TX data reg shift when in qe mode */
u32 hw_mode; /* Holds HW mode register settings */
};
static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
{
out_be32(reg, val);
}
static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
{
return in_be32(reg);
}
#define MPC83XX_SPI_RX_BUF(type) \
static \
void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
{ \
type *rx = mpc8xxx_spi->rx; \
*rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
mpc8xxx_spi->rx = rx; \
}
#define MPC83XX_SPI_TX_BUF(type) \
static \
u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
{ \
u32 data; \
const type *tx = mpc8xxx_spi->tx; \
if (!tx) \
return 0; \
data = *tx++ << mpc8xxx_spi->tx_shift; \
mpc8xxx_spi->tx = tx; \
return data; \
}
static void *fsl_dummy_rx;
static DEFINE_MUTEX(fsl_dummy_rx_lock);
static int fsl_dummy_rx_refcnt;
MPC83XX_SPI_RX_BUF(u8)
MPC83XX_SPI_RX_BUF(u16)
MPC83XX_SPI_RX_BUF(u32)
MPC83XX_SPI_TX_BUF(u8)
MPC83XX_SPI_TX_BUF(u16)
MPC83XX_SPI_TX_BUF(u32)
static void mpc8xxx_spi_change_mode(struct spi_device *spi)
static void fsl_spi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
__be32 __iomem *mode = &mspi->base->mode;
struct fsl_spi_reg *reg_base = mspi->reg_base;
__be32 __iomem *mode = &reg_base->mode;
unsigned long flags;
if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
......@@ -238,7 +136,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi)
local_irq_restore(flags);
}
static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
......@@ -256,15 +154,14 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
mpc8xxx_spi_change_mode(spi);
fsl_spi_change_mode(spi);
if (pdata->cs_control)
pdata->cs_control(spi, pol);
}
}
static int
mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
struct mpc8xxx_spi *mpc8xxx_spi,
int bits_per_word)
......@@ -307,8 +204,7 @@ mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
return bits_per_word;
}
static int
mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
int bits_per_word)
{
......@@ -326,13 +222,13 @@ mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
return bits_per_word;
}
static
int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static int fsl_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi;
int bits_per_word;
int bits_per_word = 0;
u8 pm;
u32 hz;
u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
mpc8xxx_spi = spi_master_get_devdata(spi->master);
......@@ -340,9 +236,6 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
} else {
bits_per_word = 0;
hz = 0;
}
/* spi_transfer level calls that work per-word */
......@@ -388,23 +281,25 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
hz, mpc8xxx_spi->spibrg / 1024);
if (pm > 16)
pm = 16;
} else
} else {
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
}
if (pm)
pm--;
cs->hw_mode |= SPMODE_PM(pm);
mpc8xxx_spi_change_mode(spi);
fsl_spi_change_mode(spi);
return 0;
}
static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
{
struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
unsigned int xfer_ofs;
struct fsl_spi_reg *reg_base = mspi->reg_base;
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
......@@ -424,13 +319,14 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
BD_SC_LAST);
/* start transfer */
mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
}
static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, bool is_dma_mapped)
{
struct device *dev = mspi->dev;
struct fsl_spi_reg *reg_base = mspi->reg_base;
if (is_dma_mapped) {
mspi->map_tx_dma = 0;
......@@ -475,13 +371,13 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
}
/* enable rx ints */
mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
mspi->xfer_in_progress = t;
mspi->count = t->len;
/* start CPM transfers */
mpc8xxx_spi_cpm_bufs_start(mspi);
fsl_spi_cpm_bufs_start(mspi);
return 0;
......@@ -491,7 +387,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
return -ENOMEM;
}
static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct spi_transfer *t = mspi->xfer_in_progress;
......@@ -503,31 +399,34 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
mspi->xfer_in_progress = NULL;
}
static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len)
{
u32 word;
struct fsl_spi_reg *reg_base = mspi->reg_base;
mspi->count = len;
/* enable rx ints */
mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
/* transmit word */
word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
mpc8xxx_spi_write_reg(&reg_base->transmit, word);
return 0;
}
static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
bool is_dma_mapped)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_spi_reg *reg_base;
unsigned int len = t->len;
u8 bits_per_word;
int ret;
reg_base = mpc8xxx_spi->reg_base;
bits_per_word = spi->bits_per_word;
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
......@@ -551,24 +450,24 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
INIT_COMPLETION(mpc8xxx_spi->done);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
else
ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
if (ret)
return ret;
wait_for_completion(&mpc8xxx_spi->done);
/* disable rx ints */
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
mpc8xxx_spi_write_reg(&reg_base->mask, 0);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
fsl_spi_cpm_bufs_complete(mpc8xxx_spi);
return mpc8xxx_spi->count;
}
static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
static void fsl_spi_do_one_msg(struct spi_message *m)
{
struct spi_device *spi = m->spi;
struct spi_transfer *t;
......@@ -584,18 +483,18 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
status = -EINVAL;
if (cs_change)
status = mpc8xxx_spi_setup_transfer(spi, t);
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
break;
}
if (cs_change) {
mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE);
fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
ndelay(nsecs);
}
cs_change = t->cs_change;
if (t->len)
status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
if (status) {
status = -EMSGSIZE;
break;
......@@ -607,7 +506,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
if (cs_change) {
ndelay(nsecs);
mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(nsecs);
}
}
......@@ -617,35 +516,16 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
if (status || !cs_change) {
ndelay(nsecs);
mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
}
mpc8xxx_spi_setup_transfer(spi, NULL);
}
static void mpc8xxx_spi_work(struct work_struct *work)
{
struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
work);
spin_lock_irq(&mpc8xxx_spi->lock);
while (!list_empty(&mpc8xxx_spi->queue)) {
struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
struct spi_message, queue);
list_del_init(&m->queue);
spin_unlock_irq(&mpc8xxx_spi->lock);
mpc8xxx_spi_do_one_msg(m);
spin_lock_irq(&mpc8xxx_spi->lock);
}
spin_unlock_irq(&mpc8xxx_spi->lock);
fsl_spi_setup_transfer(spi, NULL);
}
static int mpc8xxx_spi_setup(struct spi_device *spi)
static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg *reg_base;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
......@@ -661,8 +541,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
reg_base = mpc8xxx_spi->reg_base;
hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
cs->hw_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
| SPMODE_REV | SPMODE_LOOP);
......@@ -676,7 +558,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
if (spi->mode & SPI_LOOP)
cs->hw_mode |= SPMODE_LOOP;
retval = mpc8xxx_spi_setup_transfer(spi, NULL);
retval = fsl_spi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
......@@ -684,9 +566,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
return 0;
}
static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
u16 len;
struct fsl_spi_reg *reg_base = mspi->reg_base;
dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
......@@ -698,20 +581,22 @@ static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
}
/* Clear the events */
mpc8xxx_spi_write_reg(&mspi->base->event, events);
mpc8xxx_spi_write_reg(&reg_base->event, events);
mspi->count -= len;
if (mspi->count)
mpc8xxx_spi_cpm_bufs_start(mspi);
fsl_spi_cpm_bufs_start(mspi);
else
complete(&mspi->done);
}
static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
struct fsl_spi_reg *reg_base = mspi->reg_base;
/* We need handle RX first */
if (events & SPIE_NE) {
u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
u32 rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
if (mspi->rx)
mspi->get_rx(rx_data, mspi);
......@@ -720,102 +605,80 @@ static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
if ((events & SPIE_NF) == 0)
/* spin until TX is done */
while (((events =
mpc8xxx_spi_read_reg(&mspi->base->event)) &
mpc8xxx_spi_read_reg(&reg_base->event)) &
SPIE_NF) == 0)
cpu_relax();
/* Clear the events */
mpc8xxx_spi_write_reg(&mspi->base->event, events);
mpc8xxx_spi_write_reg(&reg_base->event, events);
mspi->count -= 1;
if (mspi->count) {
u32 word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
mpc8xxx_spi_write_reg(&reg_base->transmit, word);
} else {
complete(&mspi->done);
}
}
static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
{
struct mpc8xxx_spi *mspi = context_data;
irqreturn_t ret = IRQ_NONE;
u32 events;
struct fsl_spi_reg *reg_base = mspi->reg_base;
/* Get interrupt events(tx/rx) */
events = mpc8xxx_spi_read_reg(&mspi->base->event);
events = mpc8xxx_spi_read_reg(&reg_base->event);
if (events)
ret = IRQ_HANDLED;
dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
if (mspi->flags & SPI_CPM_MODE)
mpc8xxx_spi_cpm_irq(mspi, events);
fsl_spi_cpm_irq(mspi, events);
else
mpc8xxx_spi_cpu_irq(mspi, events);
fsl_spi_cpu_irq(mspi, events);
return ret;
}
static int mpc8xxx_spi_transfer(struct spi_device *spi,
struct spi_message *m)
static void *fsl_spi_alloc_dummy_rx(void)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
unsigned long flags;
mutex_lock(&fsl_dummy_rx_lock);
m->actual_length = 0;
m->status = -EINPROGRESS;
if (!fsl_dummy_rx)
fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
if (fsl_dummy_rx)
fsl_dummy_rx_refcnt++;
spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
list_add_tail(&m->queue, &mpc8xxx_spi->queue);
queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
mutex_unlock(&fsl_dummy_rx_lock);
return 0;
return fsl_dummy_rx;
}
static void mpc8xxx_spi_cleanup(struct spi_device *spi)
static void fsl_spi_free_dummy_rx(void)
{
kfree(spi->controller_state);
}
mutex_lock(&fsl_dummy_rx_lock);
static void *mpc8xxx_spi_alloc_dummy_rx(void)
{
mutex_lock(&mpc8xxx_dummy_rx_lock);
if (!mpc8xxx_dummy_rx)
mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
if (mpc8xxx_dummy_rx)
mpc8xxx_dummy_rx_refcnt++;
mutex_unlock(&mpc8xxx_dummy_rx_lock);
return mpc8xxx_dummy_rx;
}
static void mpc8xxx_spi_free_dummy_rx(void)
{
mutex_lock(&mpc8xxx_dummy_rx_lock);
switch (mpc8xxx_dummy_rx_refcnt) {
switch (fsl_dummy_rx_refcnt) {
case 0:
WARN_ON(1);
break;
case 1:
kfree(mpc8xxx_dummy_rx);
mpc8xxx_dummy_rx = NULL;
kfree(fsl_dummy_rx);
fsl_dummy_rx = NULL;
/* fall through */
default:
mpc8xxx_dummy_rx_refcnt--;
fsl_dummy_rx_refcnt--;
break;
}
mutex_unlock(&mpc8xxx_dummy_rx_lock);
mutex_unlock(&fsl_dummy_rx_lock);
}
static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
......@@ -869,7 +732,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
return pram_ofs;
}
static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
......@@ -881,7 +744,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
if (!(mspi->flags & SPI_CPM_MODE))
return 0;
if (!mpc8xxx_spi_alloc_dummy_rx())
if (!fsl_spi_alloc_dummy_rx())
return -ENOMEM;
if (mspi->flags & SPI_QE) {
......@@ -902,7 +765,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
}
}
pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
pram_ofs = fsl_spi_cpm_get_pram(mspi);
if (IS_ERR_VALUE(pram_ofs)) {
dev_err(dev, "can't allocate spi parameter ram\n");
goto err_pram;
......@@ -922,7 +785,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
goto err_dummy_tx;
}
mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
dev_err(dev, "unable to map dummy rx buffer\n");
......@@ -960,11 +823,11 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
err_bds:
cpm_muram_free(pram_ofs);
err_pram:
mpc8xxx_spi_free_dummy_rx();
fsl_spi_free_dummy_rx();
return -ENOMEM;
}
static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
......@@ -972,30 +835,22 @@ static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
cpm_muram_free(cpm_muram_offset(mspi->pram));
mpc8xxx_spi_free_dummy_rx();
fsl_spi_free_dummy_rx();
}
static const char *mpc8xxx_spi_strmode(unsigned int flags)
static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
{
if (flags & SPI_QE_CPU_MODE) {
return "QE CPU";
} else if (flags & SPI_CPM_MODE) {
if (flags & SPI_QE)
return "QE";
else if (flags & SPI_CPM2)
return "CPM2";
else
return "CPM1";
}
return "CPU";
iounmap(mspi->reg_base);
fsl_spi_cpm_free(mspi);
}
static struct spi_master * __devinit
mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg *reg_base;
u32 regval;
int ret = 0;
......@@ -1007,132 +862,77 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
dev_set_drvdata(dev, master);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
| SPI_LSB_FIRST | SPI_LOOP;
ret = mpc8xxx_spi_probe(dev, mem, irq);
if (ret)
goto err_probe;
master->setup = mpc8xxx_spi_setup;
master->transfer = mpc8xxx_spi_transfer;
master->cleanup = mpc8xxx_spi_cleanup;
master->dev.of_node = dev->of_node;
master->setup = fsl_spi_setup;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->dev = dev;
mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
mpc8xxx_spi->flags = pdata->flags;
mpc8xxx_spi->spibrg = pdata->sysclk;
mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_spi_remove;
ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
ret = fsl_spi_cpm_init(mpc8xxx_spi);
if (ret)
goto err_cpm_init;
mpc8xxx_spi->rx_shift = 0;
mpc8xxx_spi->tx_shift = 0;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
mpc8xxx_spi->rx_shift = 16;
mpc8xxx_spi->tx_shift = 24;
}
init_completion(&mpc8xxx_spi->done);
mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->base == NULL) {
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->reg_base == NULL) {
ret = -ENOMEM;
goto err_ioremap;
}
mpc8xxx_spi->irq = irq;
/* Register for SPI Interrupt */
ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq,
0, "mpc8xxx_spi", mpc8xxx_spi);
ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
0, "fsl_spi", mpc8xxx_spi);
if (ret != 0)
goto unmap_io;
goto free_irq;
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
reg_base = mpc8xxx_spi->reg_base;
/* SPI controller initializations */
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0);
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0);
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff);
mpc8xxx_spi_write_reg(&reg_base->mode, 0);
mpc8xxx_spi_write_reg(&reg_base->mask, 0);
mpc8xxx_spi_write_reg(&reg_base->command, 0);
mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
regval |= SPMODE_OP;
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
spin_lock_init(&mpc8xxx_spi->lock);
init_completion(&mpc8xxx_spi->done);
INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
INIT_LIST_HEAD(&mpc8xxx_spi->queue);
mpc8xxx_spi->workqueue = create_singlethread_workqueue(
dev_name(master->dev.parent));
if (mpc8xxx_spi->workqueue == NULL) {
ret = -EBUSY;
goto free_irq;
}
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
ret = spi_register_master(master);
if (ret < 0)
goto unreg_master;
dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return master;
unreg_master:
destroy_workqueue(mpc8xxx_spi->workqueue);
free_irq:
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
unmap_io:
iounmap(mpc8xxx_spi->base);
free_irq:
iounmap(mpc8xxx_spi->reg_base);
err_ioremap:
mpc8xxx_spi_cpm_free(mpc8xxx_spi);
fsl_spi_cpm_free(mpc8xxx_spi);
err_cpm_init:
err_probe:
spi_master_put(master);
err:
return ERR_PTR(ret);
}
static int __devexit mpc8xxx_spi_remove(struct device *dev)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct spi_master *master;
master = dev_get_drvdata(dev);
mpc8xxx_spi = spi_master_get_devdata(master);
flush_workqueue(mpc8xxx_spi->workqueue);
destroy_workqueue(mpc8xxx_spi->workqueue);
spi_unregister_master(master);
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
iounmap(mpc8xxx_spi->base);
mpc8xxx_spi_cpm_free(mpc8xxx_spi);
return 0;
}
struct mpc8xxx_spi_probe_info {
struct fsl_spi_platform_data pdata;
int *gpios;
bool *alow_flags;
};
static struct mpc8xxx_spi_probe_info *
to_of_pinfo(struct fsl_spi_platform_data *pdata)
{
return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
}
static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
struct device *dev = spi->dev.parent;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
......@@ -1143,7 +943,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
gpio_set_value(gpio, on ^ alow);
}
static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
static int of_fsl_spi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev->platform_data;
......@@ -1204,7 +1004,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
}
pdata->max_chipselect = ngpios;
pdata->cs_control = mpc8xxx_spi_cs_control;
pdata->cs_control = fsl_spi_cs_control;
return 0;
......@@ -1223,7 +1023,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
return ret;
}
static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
static int of_fsl_spi_free_chipselects(struct device *dev)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
......@@ -1242,50 +1042,21 @@ static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
return 0;
}
static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
static int __devinit of_fsl_spi_probe(struct platform_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct mpc8xxx_spi_probe_info *pinfo;
struct fsl_spi_platform_data *pdata;
struct spi_master *master;
struct resource mem;
struct resource irq;
const void *prop;
int ret = -ENOMEM;
pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
pdata = &pinfo->pdata;
dev->platform_data = pdata;
/* Allocate bus num dynamically. */
pdata->bus_num = -1;
/* SPI controller is either clocked from QE or SoC clock. */
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
if (pdata->sysclk == -1) {
ret = -ENODEV;
goto err_clk;
}
}
ret = of_mpc8xxx_spi_probe(ofdev, ofid);
if (ret)
return ret;
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
pdata->flags = SPI_QE_CPU_MODE;
else if (prop && !strcmp(prop, "qe"))
pdata->flags = SPI_CPM_MODE | SPI_QE;
else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
pdata->flags = SPI_CPM_MODE | SPI_CPM2;
else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
ret = of_mpc8xxx_spi_get_chipselects(dev);
ret = of_fsl_spi_get_chipselects(dev);
if (ret)
goto err;
......@@ -1299,7 +1070,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
goto err;
}
master = mpc8xxx_spi_probe(dev, &mem, irq.start);
master = fsl_spi_probe(dev, &mem, irq.start);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
......@@ -1308,37 +1079,35 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev,
return 0;
err:
of_mpc8xxx_spi_free_chipselects(dev);
err_clk:
kfree(pinfo);
of_fsl_spi_free_chipselects(dev);
return ret;
}
static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev)
static int __devexit of_fsl_spi_remove(struct platform_device *ofdev)
{
int ret;
ret = mpc8xxx_spi_remove(&ofdev->dev);
if (ret)
return ret;
of_mpc8xxx_spi_free_chipselects(&ofdev->dev);
of_fsl_spi_free_chipselects(&ofdev->dev);
return 0;
}
static const struct of_device_id of_mpc8xxx_spi_match[] = {
static const struct of_device_id of_fsl_spi_match[] = {
{ .compatible = "fsl,spi" },
{},
{}
};
MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match);
MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
static struct of_platform_driver of_mpc8xxx_spi_driver = {
static struct of_platform_driver of_fsl_spi_driver = {
.driver = {
.name = "mpc8xxx_spi",
.name = "fsl_spi",
.owner = THIS_MODULE,
.of_match_table = of_mpc8xxx_spi_match,
.of_match_table = of_fsl_spi_match,
},
.probe = of_mpc8xxx_spi_probe,
.remove = __devexit_p(of_mpc8xxx_spi_remove),
.probe = of_fsl_spi_probe,
.remove = __devexit_p(of_fsl_spi_remove),
};
#ifdef CONFIG_MPC832x_RDB
......@@ -1366,7 +1135,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
if (irq <= 0)
return -EINVAL;
master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
master = fsl_spi_probe(&pdev->dev, mem, irq);
if (IS_ERR(master))
return PTR_ERR(master);
return 0;
......@@ -1405,21 +1174,20 @@ static void __init legacy_driver_register(void) {}
static void __exit legacy_driver_unregister(void) {}
#endif /* CONFIG_MPC832x_RDB */
static int __init mpc8xxx_spi_init(void)
static int __init fsl_spi_init(void)
{
legacy_driver_register();
return of_register_platform_driver(&of_mpc8xxx_spi_driver);
return of_register_platform_driver(&of_fsl_spi_driver);
}
module_init(fsl_spi_init);
static void __exit mpc8xxx_spi_exit(void)
static void __exit fsl_spi_exit(void)
{
of_unregister_platform_driver(&of_mpc8xxx_spi_driver);
of_unregister_platform_driver(&of_fsl_spi_driver);
legacy_driver_unregister();
}
module_init(mpc8xxx_spi_init);
module_exit(mpc8xxx_spi_exit);
module_exit(fsl_spi_exit);
MODULE_AUTHOR("Kumar Gala");
MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver");
MODULE_DESCRIPTION("Simple Freescale SPI Driver");
MODULE_LICENSE("GPL");
......@@ -261,15 +261,25 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
s3c2410_dma_config(sdd->tx_dmach, 1);
s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8);
s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
xfer->tx_dma, xfer->len);
s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
} else {
unsigned char *buf = (unsigned char *) xfer->tx_buf;
int i = 0;
while (i < xfer->len)
writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
switch (sdd->cur_bpw) {
case 32:
iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len / 4);
break;
case 16:
iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len / 2);
break;
default:
iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len);
break;
}
}
}
......@@ -286,7 +296,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
s3c2410_dma_config(sdd->rx_dmach, 1);
s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8);
s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
xfer->rx_dma, xfer->len);
s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
......@@ -366,20 +376,26 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
return -EIO;
}
} else {
unsigned char *buf;
int i;
/* If it was only Tx */
if (xfer->rx_buf == NULL) {
sdd->state &= ~TXBUSY;
return 0;
}
i = 0;
buf = xfer->rx_buf;
while (i < xfer->len)
buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
switch (sdd->cur_bpw) {
case 32:
ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
xfer->rx_buf, xfer->len / 4);
break;
case 16:
ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
xfer->rx_buf, xfer->len / 2);
break;
default:
ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
xfer->rx_buf, xfer->len);
break;
}
sdd->state &= ~RXBUSY;
}
......@@ -399,13 +415,18 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 val;
/* Disable Clock */
if (sci->clk_from_cmu) {
clk_disable(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
/* Set Polarity and Phase */
val = readl(regs + S3C64XX_SPI_CH_CFG);
......@@ -429,18 +450,27 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
switch (sdd->cur_bpw) {
case 32:
val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
break;
case 16:
val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
break;
default:
val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
break;
}
val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
writel(val, regs + S3C64XX_SPI_MODE_CFG);
if (sci->clk_from_cmu) {
/* Configure Clock */
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
/* Enable Clock */
clk_enable(sdd->src_clk);
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_PSR_MASK;
......@@ -452,6 +482,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
}
static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
......@@ -499,6 +530,7 @@ static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
......@@ -514,6 +546,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
/* Map until end or first fail */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
continue;
if (xfer->tx_buf != NULL) {
xfer->tx_dma = dma_map_single(dev,
(void *)xfer->tx_buf, xfer->len,
......@@ -545,6 +580,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
......@@ -553,6 +589,9 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
continue;
if (xfer->rx_buf != NULL
&& xfer->rx_dma != XFER_DMAADDR_INVALID)
dma_unmap_single(dev, xfer->rx_dma,
......@@ -608,6 +647,14 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
bpw = xfer->bits_per_word ? : spi->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
if (xfer->len % (bpw / 8)) {
dev_err(&spi->dev,
"Xfer length(%u) not a multiple of word size(%u)\n",
xfer->len, bpw / 8);
status = -EIO;
goto out;
}
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
......@@ -798,7 +845,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
struct spi_message *msg;
u32 psr, speed;
unsigned long flags;
int err = 0;
......@@ -841,7 +887,11 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
/* Check if we can provide the requested rate */
speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */
if (!sci->clk_from_cmu) {
u32 psr, speed;
/* Max possible */
speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
if (spi->max_speed_hz > speed)
spi->max_speed_hz = speed;
......@@ -866,6 +916,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
spi->max_speed_hz = speed;
else
err = -EINVAL;
}
setup_exit:
......@@ -888,6 +939,7 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
if (!sci->clk_from_cmu)
writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
regs + S3C64XX_SPI_CLK_CFG);
writel(0, regs + S3C64XX_SPI_MODE_CFG);
......
/*
* SPI bus driver for the Topcliff PCH used by Intel SoCs
*
* Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/spi/spidev.h>
#include <linux/module.h>
#include <linux/device.h>
/* Register offsets */
#define PCH_SPCR 0x00 /* SPI control register */
#define PCH_SPBRR 0x04 /* SPI baud rate register */
#define PCH_SPSR 0x08 /* SPI status register */
#define PCH_SPDWR 0x0C /* SPI write data register */
#define PCH_SPDRR 0x10 /* SPI read data register */
#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
#define PCH_SRST 0x1C /* SPI reset register */
#define PCH_SPSR_TFD 0x000007C0
#define PCH_SPSR_RFD 0x0000F800
#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
#define PCH_RX_THOLD 7
#define PCH_RX_THOLD_MAX 15
#define PCH_MAX_BAUDRATE 5000000
#define PCH_MAX_FIFO_DEPTH 16
#define STATUS_RUNNING 1
#define STATUS_EXITING 2
#define PCH_SLEEP_TIME 10
#define PCH_ADDRESS_SIZE 0x20
#define SSN_LOW 0x02U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
#define PCI_DEVICE_ID_GE_SPI 0x8816
#define SPCR_SPE_BIT (1 << 0)
#define SPCR_MSTR_BIT (1 << 1)
#define SPCR_LSBF_BIT (1 << 4)
#define SPCR_CPHA_BIT (1 << 5)
#define SPCR_CPOL_BIT (1 << 6)
#define SPCR_TFIE_BIT (1 << 8)
#define SPCR_RFIE_BIT (1 << 9)
#define SPCR_FIE_BIT (1 << 10)
#define SPCR_ORIE_BIT (1 << 11)
#define SPCR_MDFIE_BIT (1 << 12)
#define SPCR_FICLR_BIT (1 << 24)
#define SPSR_TFI_BIT (1 << 0)
#define SPSR_RFI_BIT (1 << 1)
#define SPSR_FI_BIT (1 << 2)
#define SPBRR_SIZE_BIT (1 << 10)
#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
#define SPCR_RFIC_FIELD 20
#define SPCR_TFIC_FIELD 16
#define SPSR_INT_BITS 0x1F
#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1))
#define MASK_RFIC_SPCR_BITS (~(0xf << 20))
#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12))
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
/**
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
* @master: Pointer to the SPI master structure
* @work: Reference to work queue handler
* @wk: Workqueue for carrying out execution of the
* requests
* @wait: Wait queue for waking up upon receiving an
* interrupt.
* @transfer_complete: Status of SPI Transfer
* @bcurrent_msg_processing: Status flag for message processing
* @lock: Lock for protecting this structure
* @queue: SPI Message queue
* @status: Status of the SPI driver
* @bpw_len: Length of data to be transferred in bits per
* word
* @transfer_active: Flag showing active transfer
* @tx_index: Transmit data count; for bookkeeping during
* transfer
* @rx_index: Receive data count; for bookkeeping during
* transfer
* @tx_buff: Buffer for data to be transmitted
* @rx_index: Buffer for Received data
* @n_curnt_chip: The chip number that this SPI driver currently
* operates on
* @current_chip: Reference to the current chip that this SPI
* driver currently operates on
* @current_msg: The current message that this SPI driver is
* handling
* @cur_trans: The current transfer that this SPI driver is
* handling
* @board_dat: Reference to the SPI device data structure
*/
struct pch_spi_data {
void __iomem *io_remap_addr;
struct spi_master *master;
struct work_struct work;
struct workqueue_struct *wk;
wait_queue_head_t wait;
u8 transfer_complete;
u8 bcurrent_msg_processing;
spinlock_t lock;
struct list_head queue;
u8 status;
u32 bpw_len;
u8 transfer_active;
u32 tx_index;
u32 rx_index;
u16 *pkt_tx_buff;
u16 *pkt_rx_buff;
u8 n_curnt_chip;
struct spi_device *current_chip;
struct spi_message *current_msg;
struct spi_transfer *cur_trans;
struct pch_spi_board_data *board_dat;
};
/**
* struct pch_spi_board_data - Holds the SPI device specific details
* @pdev: Pointer to the PCI device
* @irq_reg_sts: Status of IRQ registration
* @pci_req_sts: Status of pci_request_regions
* @suspend_sts: Status of suspend
* @data: Pointer to SPI channel data structure
*/
struct pch_spi_board_data {
struct pci_dev *pdev;
u8 irq_reg_sts;
u8 pci_req_sts;
u8 suspend_sts;
struct pch_spi_data *data;
};
static struct pci_device_id pch_spi_pcidev_id[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)},
{0,}
};
/**
* pch_spi_writereg() - Performs register writes
* @master: Pointer to struct spi_master.
* @idx: Register offset.
* @val: Value to be written to register.
*/
static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
{
struct pch_spi_data *data = spi_master_get_devdata(master);
iowrite32(val, (data->io_remap_addr + idx));
}
/**
* pch_spi_readreg() - Performs register reads
* @master: Pointer to struct spi_master.
* @idx: Register offset.
*/
static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
{
struct pch_spi_data *data = spi_master_get_devdata(master);
return ioread32(data->io_remap_addr + idx);
}
static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
u32 set, u32 clr)
{
u32 tmp = pch_spi_readreg(master, idx);
tmp = (tmp & ~clr) | set;
pch_spi_writereg(master, idx, tmp);
}
static void pch_spi_set_master_mode(struct spi_master *master)
{
pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
}
/**
* pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
* @master: Pointer to struct spi_master.
*/
static void pch_spi_clear_fifo(struct spi_master *master)
{
pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
}
static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
void __iomem *io_remap_addr)
{
u32 n_read, tx_index, rx_index, bpw_len;
u16 *pkt_rx_buffer, *pkt_tx_buff;
int read_cnt;
u32 reg_spcr_val;
void __iomem *spsr;
void __iomem *spdrr;
void __iomem *spdwr;
spsr = io_remap_addr + PCH_SPSR;
iowrite32(reg_spsr_val, spsr);
if (data->transfer_active) {
rx_index = data->rx_index;
tx_index = data->tx_index;
bpw_len = data->bpw_len;
pkt_rx_buffer = data->pkt_rx_buff;
pkt_tx_buff = data->pkt_tx_buff;
spdrr = io_remap_addr + PCH_SPDRR;
spdwr = io_remap_addr + PCH_SPDWR;
n_read = PCH_READABLE(reg_spsr_val);
for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
pkt_rx_buffer[rx_index++] = ioread32(spdrr);
if (tx_index < bpw_len)
iowrite32(pkt_tx_buff[tx_index++], spdwr);
}
/* disable RFI if not needed */
if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
/* reset rx threshold */
reg_spcr_val &= MASK_RFIC_SPCR_BITS;
reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))),
(io_remap_addr + PCH_SPCR));
}
/* update counts */
data->tx_index = tx_index;
data->rx_index = rx_index;
}
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
/* disable FI & RFI interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
SPCR_FIE_BIT | SPCR_TFIE_BIT);
/* transfer is completed;inform pch_spi_process_messages */
data->transfer_complete = true;
wake_up(&data->wait);
}
}
/**
* pch_spi_handler() - Interrupt handler
* @irq: The interrupt number.
* @dev_id: Pointer to struct pch_spi_board_data.
*/
static irqreturn_t pch_spi_handler(int irq, void *dev_id)
{
u32 reg_spsr_val;
struct pch_spi_data *data;
void __iomem *spsr;
void __iomem *io_remap_addr;
irqreturn_t ret = IRQ_NONE;
struct pch_spi_board_data *board_dat = dev_id;
if (board_dat->suspend_sts) {
dev_dbg(&board_dat->pdev->dev,
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
data = board_dat->data;
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
ret = IRQ_HANDLED;
}
dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
__func__, ret);
return ret;
}
/**
* pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
* @master: Pointer to struct spi_master.
* @speed_hz: Baud rate.
*/
static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
{
u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
/* if baud rate is less than we can support limit it */
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS);
}
/**
* pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
* @master: Pointer to struct spi_master.
* @bits_per_word: Bits per word for SPI transfer.
*/
static void pch_spi_set_bits_per_word(struct spi_master *master,
u8 bits_per_word)
{
if (bits_per_word == 8)
pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
else
pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
}
/**
* pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
* @spi: Pointer to struct spi_device.
*/
static void pch_spi_setup_transfer(struct spi_device *spi)
{
u32 flags = 0;
dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
__func__, pch_spi_readreg(spi->master, PCH_SPBRR),
spi->max_speed_hz);
pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
/* set bits per word */
pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
if (!(spi->mode & SPI_LSB_FIRST))
flags |= SPCR_LSBF_BIT;
if (spi->mode & SPI_CPOL)
flags |= SPCR_CPOL_BIT;
if (spi->mode & SPI_CPHA)
flags |= SPCR_CPHA_BIT;
pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
(SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
/* Clear the FIFO by toggling FICLR to 1 and back to 0 */
pch_spi_clear_fifo(spi->master);
}
/**
* pch_spi_reset() - Clears SPI registers
* @master: Pointer to struct spi_master.
*/
static void pch_spi_reset(struct spi_master *master)
{
/* write 1 to reset SPI */
pch_spi_writereg(master, PCH_SRST, 0x1);
/* clear reset */
pch_spi_writereg(master, PCH_SRST, 0x0);
}
static int pch_spi_setup(struct spi_device *pspi)
{
/* check bits per word */
if (pspi->bits_per_word == 0) {
pspi->bits_per_word = 8;
dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
}
if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) {
dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__);
return -EINVAL;
}
/* Check baud rate setting */
/* if baud rate of chip is greater than
max we can support,return error */
if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
pspi->max_speed_hz = PCH_MAX_BAUDRATE;
dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
(pspi->mode) & (SPI_CPOL | SPI_CPHA));
return 0;
}
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
struct spi_transfer *transfer;
struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
int retval;
unsigned long flags;
/* validate spi message and baud rate */
if (unlikely(list_empty(&pmsg->transfers) == 1)) {
dev_err(&pspi->dev, "%s list empty\n", __func__);
retval = -EINVAL;
goto err_out;
}
if (unlikely(pspi->max_speed_hz == 0)) {
dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n",
__func__, pspi->max_speed_hz);
retval = -EINVAL;
goto err_out;
}
dev_dbg(&pspi->dev, "%s Transfer List not empty. "
"Transfer Speed is set.\n", __func__);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
if (!transfer->tx_buf && !transfer->rx_buf) {
dev_err(&pspi->dev,
"%s Tx and Rx buffer NULL\n", __func__);
retval = -EINVAL;
goto err_out;
}
if (!transfer->len) {
dev_err(&pspi->dev, "%s Transfer length invalid\n",
__func__);
retval = -EINVAL;
goto err_out;
}
dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
" valid\n", __func__);
/* if baud rate hs been specified validate the same */
if (transfer->speed_hz > PCH_MAX_BAUDRATE)
transfer->speed_hz = PCH_MAX_BAUDRATE;
/* if bits per word has been specified validate the same */
if (transfer->bits_per_word) {
if ((transfer->bits_per_word != 8)
&& (transfer->bits_per_word != 16)) {
retval = -EINVAL;
dev_err(&pspi->dev,
"%s Invalid bits per word\n", __func__);
goto err_out;
}
}
}
spin_lock_irqsave(&data->lock, flags);
/* We won't process any messages if we have been asked to terminate */
if (data->status == STATUS_EXITING) {
dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
retval = -ESHUTDOWN;
goto err_return_spinlock;
}
/* If suspended ,return -EINVAL */
if (data->board_dat->suspend_sts) {
dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
retval = -EINVAL;
goto err_return_spinlock;
}
/* set status of message */
pmsg->actual_length = 0;
dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
pmsg->status = -EINPROGRESS;
/* add message to queue */
list_add_tail(&pmsg->queue, &data->queue);
dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
/* schedule work queue to run */
queue_work(data->wk, &data->work);
dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
retval = 0;
err_return_spinlock:
spin_unlock_irqrestore(&data->lock, flags);
err_out:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
return retval;
}
static inline void pch_spi_select_chip(struct pch_spi_data *data,
struct spi_device *pspi)
{
if (data->current_chip != NULL) {
if (pspi->chip_select != data->n_curnt_chip) {
dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
data->current_chip = NULL;
}
}
data->current_chip = pspi;
data->n_curnt_chip = data->current_chip->chip_select;
dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
pch_spi_setup_transfer(pspi);
}
static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
struct spi_message **ppmsg)
{
int size;
u32 n_writes;
int j;
struct spi_message *pmsg;
const u8 *tx_buf;
const u16 *tx_sbuf;
pmsg = *ppmsg;
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
pch_spi_set_bits_per_word(data->master,
data->cur_trans->bits_per_word);
*bpw = data->cur_trans->bits_per_word;
} else {
*bpw = data->current_msg->spi->bits_per_word;
}
/* reset Tx/Rx index */
data->tx_index = 0;
data->rx_index = 0;
data->bpw_len = data->cur_trans->len / (*bpw / 8);
/* find alloc size */
size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
/* allocate memory for pkt_tx_buff & pkt_rx_buffer */
data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
if (data->pkt_tx_buff != NULL) {
data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
if (!data->pkt_rx_buff)
kfree(data->pkt_tx_buff);
}
if (!data->pkt_rx_buff) {
/* flush queue and set status of all transfers to -ENOMEM */
dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -ENOMEM;
if (pmsg->complete != 0)
pmsg->complete(pmsg->context);
/* delete from queue */
list_del_init(&pmsg->queue);
}
return;
}
/* copy Tx Data */
if (data->cur_trans->tx_buf != NULL) {
if (*bpw == 8) {
tx_buf = data->cur_trans->tx_buf;
for (j = 0; j < data->bpw_len; j++)
data->pkt_tx_buff[j] = *tx_buf++;
} else {
tx_sbuf = data->cur_trans->tx_buf;
for (j = 0; j < data->bpw_len; j++)
data->pkt_tx_buff[j] = *tx_sbuf++;
}
}
/* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
n_writes = data->bpw_len;
if (n_writes > PCH_MAX_FIFO_DEPTH)
n_writes = PCH_MAX_FIFO_DEPTH;
dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
"0x2 to SSNXCR\n", __func__);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
for (j = 0; j < n_writes; j++)
pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
/* update tx_index */
data->tx_index = j;
/* reset transfer complete flag */
data->transfer_complete = false;
data->transfer_active = true;
}
static void pch_spi_nomore_transfer(struct pch_spi_data *data,
struct spi_message *pmsg)
{
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
data->current_msg->status = 0;
if (data->current_msg->complete != 0) {
dev_dbg(&data->master->dev,
"%s:Invoking callback of SPI core\n", __func__);
data->current_msg->complete(data->current_msg->context);
}
/* update status in global variable */
data->bcurrent_msg_processing = false;
dev_dbg(&data->master->dev,
"%s:data->bcurrent_msg_processing = false\n", __func__);
data->current_msg = NULL;
data->cur_trans = NULL;
/* check if we have items in list and not suspending
* return 1 if list empty */
if ((list_empty(&data->queue) == 0) &&
(!data->board_dat->suspend_sts) &&
(data->status != STATUS_EXITING)) {
/* We have some more work to do (either there is more tranint
* bpw;sfer requests in the current message or there are
*more messages)
*/
dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
queue_work(data->wk, &data->work);
} else if (data->board_dat->suspend_sts ||
data->status == STATUS_EXITING) {
dev_dbg(&data->master->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete)
pmsg->complete(pmsg->context);
/* delete from queue */
list_del_init(&pmsg->queue);
}
}
}
static void pch_spi_set_ir(struct pch_spi_data *data)
{
/* enable interrupts */
if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
/* set receive threhold to PCH_RX_THOLD */
pch_spi_setclr_reg(data->master, PCH_SPCR,
PCH_RX_THOLD << SPCR_TFIC_FIELD,
~MASK_TFIC_SPCR_BITS);
/* enable FI and RFI interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR,
SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0);
} else {
/* set receive threhold to maximum */
pch_spi_setclr_reg(data->master, PCH_SPCR,
PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
~MASK_TFIC_SPCR_BITS);
/* enable FI interrupt */
pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0);
}
dev_dbg(&data->master->dev,
"%s:invoking pch_spi_set_enable to enable SPI\n", __func__);
/* SPI set enable */
pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
wait_event_interruptible(data->wait, data->transfer_complete);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
dev_dbg(&data->master->dev,
"%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
data->transfer_active = false;
dev_dbg(&data->master->dev,
"%s set data->transfer_active = false\n", __func__);
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
/* disable interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
{
int j;
u8 *rx_buf;
u16 *rx_sbuf;
/* copy Rx Data */
if (!data->cur_trans->rx_buf)
return;
if (bpw == 8) {
rx_buf = data->cur_trans->rx_buf;
for (j = 0; j < data->bpw_len; j++)
*rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
} else {
rx_sbuf = data->cur_trans->rx_buf;
for (j = 0; j < data->bpw_len; j++)
*rx_sbuf++ = data->pkt_rx_buff[j];
}
}
static void pch_spi_process_messages(struct work_struct *pwork)
{
struct spi_message *pmsg;
struct pch_spi_data *data;
int bpw;
data = container_of(pwork, struct pch_spi_data, work);
dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
dev_dbg(&data->master->dev,
"%s suspend/remove initiated,flushing queue\n",
__func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete != 0) {
spin_unlock(&data->lock);
pmsg->complete(pmsg->context);
spin_lock(&data->lock);
}
/* delete from queue */
list_del_init(&pmsg->queue);
}
spin_unlock(&data->lock);
return;
}
data->bcurrent_msg_processing = true;
dev_dbg(&data->master->dev,
"%s Set data->bcurrent_msg_processing= true\n", __func__);
/* Get the message from the queue and delete it from there. */
data->current_msg = list_entry(data->queue.next, struct spi_message,
queue);
list_del_init(&data->current_msg->queue);
data->current_msg->status = 0;
pch_spi_select_chip(data, data->current_msg->spi);
spin_unlock(&data->lock);
do {
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
the 1st transfer request from the message. */
spin_lock(&data->lock);
if (data->cur_trans == NULL) {
data->cur_trans =
list_entry(data->current_msg->transfers.
next, struct spi_transfer,
transfer_list);
dev_dbg(&data->master->dev,
"%s :Getting 1st transfer message\n", __func__);
} else {
data->cur_trans =
list_entry(data->cur_trans->transfer_list.next,
struct spi_transfer,
transfer_list);
dev_dbg(&data->master->dev,
"%s :Getting next transfer message\n",
__func__);
}
spin_unlock(&data->lock);
pch_spi_set_tx(data, &bpw, &pmsg);
/* Control interrupt*/
pch_spi_set_ir(data);
/* Disable SPI transfer */
pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0,
SPCR_SPE_BIT);
/* clear FIFO */
pch_spi_clear_fifo(data->master);
/* copy Rx Data */
pch_spi_copy_rx_data(data, bpw);
/* free memory */
kfree(data->pkt_rx_buff);
data->pkt_rx_buff = NULL;
kfree(data->pkt_tx_buff);
data->pkt_tx_buff = NULL;
/* increment message count */
data->current_msg->actual_length += data->cur_trans->len;
dev_dbg(&data->master->dev,
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
/* check for delay */
if (data->cur_trans->delay_usecs) {
dev_dbg(&data->master->dev, "%s:"
"delay in usec=%d\n", __func__,
data->cur_trans->delay_usecs);
udelay(data->cur_trans->delay_usecs);
}
spin_lock(&data->lock);
/* No more transfer in this message. */
if ((data->cur_trans->transfer_list.next) ==
&(data->current_msg->transfers)) {
pch_spi_nomore_transfer(data, pmsg);
}
spin_unlock(&data->lock);
} while (data->cur_trans != NULL);
}
static void pch_spi_free_resources(struct pch_spi_board_data *board_dat)
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* free workqueue */
if (board_dat->data->wk != NULL) {
destroy_workqueue(board_dat->data->wk);
board_dat->data->wk = NULL;
dev_dbg(&board_dat->pdev->dev,
"%s destroy_workqueue invoked successfully\n",
__func__);
}
/* disable interrupts & free IRQ */
if (board_dat->irq_reg_sts) {
/* disable interrupts */
pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
PCH_ALL);
/* free IRQ */
free_irq(board_dat->pdev->irq, board_dat);
dev_dbg(&board_dat->pdev->dev,
"%s free_irq invoked successfully\n", __func__);
board_dat->irq_reg_sts = false;
}
/* unmap PCI base address */
if (board_dat->data->io_remap_addr != 0) {
pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr);
board_dat->data->io_remap_addr = 0;
dev_dbg(&board_dat->pdev->dev,
"%s pci_iounmap invoked successfully\n", __func__);
}
/* release PCI region */
if (board_dat->pci_req_sts) {
pci_release_regions(board_dat->pdev);
dev_dbg(&board_dat->pdev->dev,
"%s pci_release_regions invoked successfully\n",
__func__);
board_dat->pci_req_sts = false;
}
}
static int pch_spi_get_resources(struct pch_spi_board_data *board_dat)
{
void __iomem *io_remap_addr;
int retval;
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* create workqueue */
board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
if (!board_dat->data->wk) {
dev_err(&board_dat->pdev->dev,
"%s create_singlet hread_workqueue failed\n", __func__);
retval = -EBUSY;
goto err_return;
}
dev_dbg(&board_dat->pdev->dev,
"%s create_singlethread_workqueue success\n", __func__);
retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME);
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s request_region failed\n", __func__);
goto err_return;
}
board_dat->pci_req_sts = true;
io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
if (io_remap_addr == 0) {
dev_err(&board_dat->pdev->dev,
"%s pci_iomap failed\n", __func__);
retval = -ENOMEM;
goto err_return;
}
/* calculate base address for all channels */
board_dat->data->io_remap_addr = io_remap_addr;
/* reset PCH SPI h/w */
pch_spi_reset(board_dat->data->master);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
/* register IRQ */
retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
IRQF_SHARED, KBUILD_MODNAME, board_dat);
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s request_irq failed\n", __func__);
goto err_return;
}
dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n",
__func__, retval);
board_dat->irq_reg_sts = true;
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
err_return:
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s FAIL:invoking pch_spi_free_resources\n", __func__);
pch_spi_free_resources(board_dat);
}
dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
return retval;
}
static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct spi_master *master;
struct pch_spi_board_data *board_dat;
int retval;
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
/* allocate memory for private data */
board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
if (board_dat == NULL) {
dev_err(&pdev->dev,
" %s memory allocation for private data failed\n",
__func__);
retval = -ENOMEM;
goto err_kmalloc;
}
dev_dbg(&pdev->dev,
"%s memory allocation for private data success\n", __func__);
/* enable PCI device */
retval = pci_enable_device(pdev);
if (retval != 0) {
dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__);
goto err_pci_en_device;
}
dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n",
__func__, retval);
board_dat->pdev = pdev;
/* alllocate memory for SPI master */
master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data));
if (master == NULL) {
retval = -ENOMEM;
dev_err(&pdev->dev, "%s Fail.\n", __func__);
goto err_spi_alloc_master;
}
dev_dbg(&pdev->dev,
"%s spi_alloc_master returned non NULL\n", __func__);
/* initialize members of SPI master */
master->bus_num = -1;
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
dev_dbg(&pdev->dev,
"%s transfer member of SPI master initialized\n", __func__);
board_dat->data = spi_master_get_devdata(master);
board_dat->data->master = master;
board_dat->data->n_curnt_chip = 255;
board_dat->data->board_dat = board_dat;
board_dat->data->status = STATUS_RUNNING;
INIT_LIST_HEAD(&board_dat->data->queue);
spin_lock_init(&board_dat->data->lock);
INIT_WORK(&board_dat->data->work, pch_spi_process_messages);
init_waitqueue_head(&board_dat->data->wait);
/* allocate resources for PCH SPI */
retval = pch_spi_get_resources(board_dat);
if (retval) {
dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval);
goto err_spi_get_resources;
}
dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n",
__func__, retval);
/* save private data in dev */
pci_set_drvdata(pdev, board_dat);
dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__);
/* set master mode */
pch_spi_set_master_mode(master);
dev_dbg(&pdev->dev,
"%s invoked pch_spi_set_master_mode\n", __func__);
/* Register the controller with the SPI core. */
retval = spi_register_master(master);
if (retval != 0) {
dev_err(&pdev->dev,
"%s spi_register_master FAILED\n", __func__);
goto err_spi_reg_master;
}
dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n",
__func__, retval);
return 0;
err_spi_reg_master:
spi_unregister_master(master);
err_spi_get_resources:
err_spi_alloc_master:
spi_master_put(master);
pci_disable_device(pdev);
err_pci_en_device:
kfree(board_dat);
err_kmalloc:
return retval;
}
static void pch_spi_remove(struct pci_dev *pdev)
{
struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
int count;
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
if (!board_dat) {
dev_err(&pdev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return;
}
/* check for any pending messages; no action is taken if the queue
* is still full; but at least we tried. Unload anyway */
count = 500;
spin_lock(&board_dat->data->lock);
board_dat->data->status = STATUS_EXITING;
while ((list_empty(&board_dat->data->queue) == 0) && --count) {
dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
__func__);
spin_unlock(&board_dat->data->lock);
msleep(PCH_SLEEP_TIME);
spin_lock(&board_dat->data->lock);
}
spin_unlock(&board_dat->data->lock);
/* Free resources allocated for PCH SPI */
pch_spi_free_resources(board_dat);
spi_unregister_master(board_dat->data->master);
/* free memory for private data */
kfree(board_dat);
pci_set_drvdata(pdev, NULL);
/* disable PCI device */
pci_disable_device(pdev);
dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__);
}
#ifdef CONFIG_PM
static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
{
u8 count;
int retval;
struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
if (!board_dat) {
dev_err(&pdev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
retval = 0;
board_dat->suspend_sts = true;
/* check if the current message is processed:
Only after thats done the transfer will be suspended */
count = 255;
while ((--count) > 0) {
if (!(board_dat->data->bcurrent_msg_processing)) {
dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_"
"msg_processing = false\n", __func__);
break;
} else {
dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_"
"processing = true\n", __func__);
}
msleep(PCH_SLEEP_TIME);
}
/* Free IRQ */
if (board_dat->irq_reg_sts) {
/* disable all interrupts */
pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
PCH_ALL);
pch_spi_reset(board_dat->data->master);
free_irq(board_dat->pdev->irq, board_dat);
board_dat->irq_reg_sts = false;
dev_dbg(&pdev->dev,
"%s free_irq invoked successfully.\n", __func__);
}
/* save config space */
retval = pci_save_state(pdev);
if (retval == 0) {
dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n",
__func__, retval);
/* disable PM notifications */
pci_enable_wake(pdev, PCI_D3hot, 0);
dev_dbg(&pdev->dev,
"%s pci_enable_wake invoked successfully\n", __func__);
/* disable PCI device */
pci_disable_device(pdev);
dev_dbg(&pdev->dev,
"%s pci_disable_device invoked successfully\n",
__func__);
/* move device to D3hot state */
pci_set_power_state(pdev, PCI_D3hot);
dev_dbg(&pdev->dev,
"%s pci_set_power_state invoked successfully\n",
__func__);
} else {
dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
}
dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval);
return retval;
}
static int pch_spi_resume(struct pci_dev *pdev)
{
int retval;
struct pch_spi_board_data *board = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
if (!board) {
dev_err(&pdev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
/* move device to DO power state */
pci_set_power_state(pdev, PCI_D0);
/* restore state */
pci_restore_state(pdev);
retval = pci_enable_device(pdev);
if (retval < 0) {
dev_err(&pdev->dev,
"%s pci_enable_device failed\n", __func__);
} else {
/* disable PM notifications */
pci_enable_wake(pdev, PCI_D3hot, 0);
/* register IRQ handler */
if (!board->irq_reg_sts) {
/* register IRQ */
retval = request_irq(board->pdev->irq, pch_spi_handler,
IRQF_SHARED, KBUILD_MODNAME,
board);
if (retval < 0) {
dev_err(&pdev->dev,
"%s request_irq failed\n", __func__);
return retval;
}
board->irq_reg_sts = true;
/* reset PCH SPI h/w */
pch_spi_reset(board->data->master);
pch_spi_set_master_mode(board->data->master);
/* set suspend status to false */
board->suspend_sts = false;
}
}
dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval);
return retval;
}
#else
#define pch_spi_suspend NULL
#define pch_spi_resume NULL
#endif
static struct pci_driver pch_spi_pcidev = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
.remove = pch_spi_remove,
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
static int __init pch_spi_init(void)
{
return pci_register_driver(&pch_spi_pcidev);
}
module_init(pch_spi_init);
static void __exit pch_spi_exit(void)
{
pci_unregister_driver(&pch_spi_pcidev);
}
module_exit(pch_spi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver");
......@@ -228,6 +228,7 @@ enum ssp_chip_select {
};
struct dma_chan;
/**
* struct pl022_ssp_master - device.platform_data for SPI controller devices.
* @num_chipselect: chipselects are used to distinguish individual
......@@ -235,11 +236,16 @@ enum ssp_chip_select {
* each slave has a chipselect signal, but it's common that not
* every chipselect is connected to a slave.
* @enable_dma: if true enables DMA driven transfers.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
*/
struct pl022_ssp_controller {
u16 bus_id;
u8 num_chipselect;
u8 enable_dma:1;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
};
/**
......@@ -270,20 +276,13 @@ struct pl022_ssp_controller {
* @dma_config: DMA configuration for SSP controller and peripheral
*/
struct pl022_config_chip {
struct device *dev;
enum ssp_loopback lbm;
enum ssp_interface iface;
enum ssp_hierarchy hierarchy;
bool slave_tx_disable;
struct ssp_clock_params clk_freq;
enum ssp_rx_endian endian_rx;
enum ssp_tx_endian endian_tx;
enum ssp_data_size data_size;
enum ssp_mode com_mode;
enum ssp_rx_level_trig rx_lev_trig;
enum ssp_tx_level_trig tx_lev_trig;
enum ssp_spi_clk_phase clk_phase;
enum ssp_spi_clk_pol clk_pol;
enum ssp_microwire_ctrl_len ctrl_len;
enum ssp_microwire_wait_state wait_state;
enum ssp_duplex duplex;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment