Commit a9e3e12f authored by Olof Johansson's avatar Olof Johansson

Merge tag 'soc-fsl-next-v5.6' of...

Merge tag 'soc-fsl-next-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers

NXP/FSL SoC driver updates for v5.6

QUICC Engine drivers
- Improve the QE drivers to be compatible with ARM/ARM64/PPC64
architectures
- Various cleanups to the QE drivers

* tag 'soc-fsl-next-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux: (49 commits)
  soc: fsl: qe: remove set but not used variable 'mm_gc'
  soc: fsl: qe: remove PPC32 dependency from CONFIG_QUICC_ENGINE
  soc: fsl: qe: remove unused #include of asm/irq.h from ucc.c
  net: ethernet: freescale: make UCC_GETH explicitly depend on PPC32
  net/wan/fsl_ucc_hdlc: reject muram offsets above 64K
  net/wan/fsl_ucc_hdlc: fix reading of __be16 registers
  net/wan/fsl_ucc_hdlc: avoid use of IS_ERR_VALUE()
  soc: fsl: qe: avoid IS_ERR_VALUE in ucc_fast.c
  soc: fsl: qe: drop pointless check in qe_sdma_init()
  soc: fsl: qe: drop use of IS_ERR_VALUE in qe_sdma_init()
  soc: fsl: qe: avoid IS_ERR_VALUE in ucc_slow.c
  soc: fsl: qe: refactor cpm_muram_alloc_common to prevent BUG on error path
  soc: fsl: qe: drop broken lazy call of cpm_muram_init()
  soc: fsl: qe: make cpm_muram_free() ignore a negative offset
  soc: fsl: qe: make cpm_muram_free() return void
  soc: fsl: qe: change return type of cpm_muram_alloc() to s32
  serial: ucc_uart: access __be32 field using be32_to_cpu
  serial: ucc_uart: limit brg-frequency workaround to PPC32
  serial: ucc_uart: use of_property_read_u32() in ucc_uart_probe()
  serial: ucc_uart: stub out soft_uart_init for !CONFIG_PPC32
  ...

Link: https://lore.kernel.org/r/1578608351-23289-1-git-send-email-leoyang.li@nxp.comSigned-off-by: default avatarOlof Johansson <olof@lixom.net>
parents 0f827273 6e62bd36
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CPM_H
#define __CPM_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <soc/fsl/qe/qe.h>
/*
* SPI Parameter RAM common to QE and CPM.
*/
struct spi_pram {
__be16 rbase; /* Rx Buffer descriptor base address */
__be16 tbase; /* Tx Buffer descriptor base address */
u8 rfcr; /* Rx function code */
u8 tfcr; /* Tx function code */
__be16 mrblr; /* Max receive buffer length */
__be32 rstate; /* Internal */
__be32 rdp; /* Internal */
__be16 rbptr; /* Internal */
__be16 rbc; /* Internal */
__be32 rxtmp; /* Internal */
__be32 tstate; /* Internal */
__be32 tdp; /* Internal */
__be16 tbptr; /* Internal */
__be16 tbc; /* Internal */
__be32 txtmp; /* Internal */
__be32 res; /* Tx temp. */
__be16 rpbase; /* Relocation pointer (CPM1 only) */
__be16 res1; /* Reserved */
};
/*
* USB Controller pram common to QE and CPM.
*/
struct usb_ctlr {
u8 usb_usmod;
u8 usb_usadr;
u8 usb_uscom;
u8 res1[1];
__be16 usb_usep[4];
u8 res2[4];
__be16 usb_usber;
u8 res3[2];
__be16 usb_usbmr;
u8 res4[1];
u8 usb_usbs;
/* Fields down below are QE-only */
__be16 usb_ussft;
u8 res5[2];
__be16 usb_usfrn;
u8 res6[0x22];
} __attribute__ ((packed));
/*
* Function code bits, usually generic to devices.
*/
#ifdef CONFIG_CPM1
#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#else
#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
#endif
#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
/* Opcodes common to CPM1 and CPM2
*/
#define CPM_CR_INIT_TRX ((ushort)0x0000)
#define CPM_CR_INIT_RX ((ushort)0x0001)
#define CPM_CR_INIT_TX ((ushort)0x0002)
#define CPM_CR_HUNT_MODE ((ushort)0x0003)
#define CPM_CR_STOP_TX ((ushort)0x0004)
#define CPM_CR_GRA_STOP_TX ((ushort)0x0005)
#define CPM_CR_RESTART_TX ((ushort)0x0006)
#define CPM_CR_CLOSE_RX_BD ((ushort)0x0007)
#define CPM_CR_SET_GADDR ((ushort)0x0008)
#define CPM_CR_SET_TIMER ((ushort)0x0008)
#define CPM_CR_STOP_IDMA ((ushort)0x000b)
/* Buffer descriptors used by many of the CPM protocols. */
typedef struct cpm_buf_desc {
ushort cbd_sc; /* Status and Control */
ushort cbd_datlen; /* Data length in buffer */
uint cbd_bufaddr; /* Buffer address in host memory */
} cbd_t;
/* Buffer descriptor control/status used by serial
*/
#define BD_SC_EMPTY (0x8000) /* Receive is empty */
#define BD_SC_READY (0x8000) /* Transmit is ready */
#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */
#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
#define BD_SC_LAST (0x0800) /* Last buffer in frame */
#define BD_SC_TC (0x0400) /* Transmit CRC */
#define BD_SC_CM (0x0200) /* Continuous mode */
#define BD_SC_ID (0x0100) /* Rec'd too many idles */
#define BD_SC_P (0x0100) /* xmt preamble */
#define BD_SC_BR (0x0020) /* Break received */
#define BD_SC_FR (0x0010) /* Framing error */
#define BD_SC_PR (0x0008) /* Parity error */
#define BD_SC_NAK (0x0004) /* NAK - did not respond */
#define BD_SC_OV (0x0002) /* Overrun */
#define BD_SC_UN (0x0002) /* Underrun */
#define BD_SC_CD (0x0001) /* */
#define BD_SC_CL (0x0001) /* Collision */
/* Buffer descriptor control/status used by Ethernet receive.
* Common to SCC and FCC.
*/
#define BD_ENET_RX_EMPTY (0x8000)
#define BD_ENET_RX_WRAP (0x2000)
#define BD_ENET_RX_INTR (0x1000)
#define BD_ENET_RX_LAST (0x0800)
#define BD_ENET_RX_FIRST (0x0400)
#define BD_ENET_RX_MISS (0x0100)
#define BD_ENET_RX_BC (0x0080) /* FCC Only */
#define BD_ENET_RX_MC (0x0040) /* FCC Only */
#define BD_ENET_RX_LG (0x0020)
#define BD_ENET_RX_NO (0x0010)
#define BD_ENET_RX_SH (0x0008)
#define BD_ENET_RX_CR (0x0004)
#define BD_ENET_RX_OV (0x0002)
#define BD_ENET_RX_CL (0x0001)
#define BD_ENET_RX_STATS (0x01ff) /* All status bits */
/* Buffer descriptor control/status used by Ethernet transmit.
* Common to SCC and FCC.
*/
#define BD_ENET_TX_READY (0x8000)
#define BD_ENET_TX_PAD (0x4000)
#define BD_ENET_TX_WRAP (0x2000)
#define BD_ENET_TX_INTR (0x1000)
#define BD_ENET_TX_LAST (0x0800)
#define BD_ENET_TX_TC (0x0400)
#define BD_ENET_TX_DEF (0x0200)
#define BD_ENET_TX_HB (0x0100)
#define BD_ENET_TX_LC (0x0080)
#define BD_ENET_TX_RL (0x0040)
#define BD_ENET_TX_RCMASK (0x003c)
#define BD_ENET_TX_UN (0x0002)
#define BD_ENET_TX_CSL (0x0001)
#define BD_ENET_TX_STATS (0x03ff) /* All status bits */
/* Buffer descriptor control/status used by Transparent mode SCC.
*/
#define BD_SCC_TX_LAST (0x0800)
/* Buffer descriptor control/status used by I2C.
*/
#define BD_I2C_START (0x0400)
#ifdef CONFIG_CPM
int cpm_command(u32 command, u8 opcode);
#else
static inline int cpm_command(u32 command, u8 opcode)
{
return -ENOSYS;
}
#endif /* CONFIG_CPM */
int cpm2_gpiochip_add32(struct device *dev);
#endif
#include <soc/fsl/cpm.h>
......@@ -34,7 +34,6 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include "mpc83xx.h"
......@@ -178,7 +177,7 @@ define_machine(mpc83xx_km) {
.name = "mpc83xx-km-platform",
.probe = mpc83xx_km_probe,
.setup_arch = mpc83xx_km_setup_arch,
.init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
......
......@@ -14,7 +14,6 @@
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/ipic.h>
#include <soc/fsl/qe/qe_ic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
......@@ -91,28 +90,6 @@ void __init mpc83xx_ipic_init_IRQ(void)
ipic_set_default_priority();
}
#ifdef CONFIG_QUICC_ENGINE
void __init mpc83xx_qe_init_IRQ(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np) {
np = of_find_node_by_type(NULL, "qeic");
if (!np)
return;
}
qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
of_node_put(np);
}
void __init mpc83xx_ipic_and_qe_init_IRQ(void)
{
mpc83xx_ipic_init_IRQ();
mpc83xx_qe_init_IRQ();
}
#endif /* CONFIG_QUICC_ENGINE */
static const struct of_device_id of_bus_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
......
......@@ -33,7 +33,6 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include "mpc83xx.h"
......@@ -102,7 +101,7 @@ define_machine(mpc832x_mds) {
.name = "MPC832x MDS",
.probe = mpc832x_sys_probe,
.setup_arch = mpc832x_sys_setup_arch,
.init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
......
......@@ -22,7 +22,6 @@
#include <asm/ipic.h>
#include <asm/udbg.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
......@@ -220,7 +219,7 @@ define_machine(mpc832x_rdb) {
.name = "MPC832x RDB",
.probe = mpc832x_rdb_probe,
.setup_arch = mpc832x_rdb_setup_arch,
.init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
......
......@@ -40,7 +40,6 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include "mpc83xx.h"
......@@ -202,7 +201,7 @@ define_machine(mpc836x_mds) {
.name = "MPC836x MDS",
.probe = mpc836x_mds_probe,
.setup_arch = mpc836x_mds_setup_arch,
.init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
......
......@@ -17,7 +17,6 @@
#include <asm/ipic.h>
#include <asm/udbg.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
......@@ -42,7 +41,7 @@ define_machine(mpc836x_rdk) {
.name = "MPC836x RDK",
.probe = mpc836x_rdk_probe,
.setup_arch = mpc836x_rdk_setup_arch,
.init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
......
......@@ -72,13 +72,6 @@ extern int mpc837x_usb_cfg(void);
extern int mpc834x_usb_cfg(void);
extern int mpc831x_usb_cfg(void);
extern void mpc83xx_ipic_init_IRQ(void);
#ifdef CONFIG_QUICC_ENGINE
extern void mpc83xx_qe_init_IRQ(void);
extern void mpc83xx_ipic_and_qe_init_IRQ(void);
#else
static inline void __init mpc83xx_qe_init_IRQ(void) {}
#define mpc83xx_ipic_and_qe_init_IRQ mpc83xx_ipic_init_IRQ
#endif /* CONFIG_QUICC_ENGINE */
#ifdef CONFIG_PCI
extern void mpc83xx_setup_pci(void);
......
......@@ -24,7 +24,6 @@
#include <asm/mpic.h>
#include <asm/ehv_pic.h>
#include <asm/swiotlb.h>
#include <soc/fsl/qe/qe_ic.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
......@@ -38,8 +37,6 @@ void __init corenet_gen_pic_init(void)
unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
MPIC_NO_RESET;
struct device_node *np;
if (ppc_md.get_irq == mpic_get_coreint_irq)
flags |= MPIC_ENABLE_COREINT;
......@@ -47,13 +44,6 @@ void __init corenet_gen_pic_init(void)
BUG_ON(mpic == NULL);
mpic_init(mpic);
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (np) {
qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
qe_ic_cascade_high_mpic);
of_node_put(np);
}
}
/*
......
......@@ -44,7 +44,6 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
#include "smp.h"
......@@ -268,33 +267,8 @@ static void __init mpc85xx_mds_qe_init(void)
}
}
static void __init mpc85xx_mds_qeic_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!of_device_is_available(np)) {
of_node_put(np);
return;
}
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np) {
np = of_find_node_by_type(NULL, "qeic");
if (!np)
return;
}
if (machine_is(p1021_mds))
qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
qe_ic_cascade_high_mpic);
else
qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL);
of_node_put(np);
}
#else
static void __init mpc85xx_mds_qe_init(void) { }
static void __init mpc85xx_mds_qeic_init(void) { }
#endif /* CONFIG_QUICC_ENGINE */
static void __init mpc85xx_mds_setup_arch(void)
......@@ -364,7 +338,6 @@ static void __init mpc85xx_mds_pic_init(void)
BUG_ON(mpic == NULL);
mpic_init(mpic);
mpc85xx_mds_qeic_init();
}
static int __init mpc85xx_mds_probe(void)
......
......@@ -23,7 +23,6 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
......@@ -44,10 +43,6 @@ void __init mpc85xx_rdb_pic_init(void)
{
struct mpic *mpic;
#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) {
mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
......@@ -62,18 +57,6 @@ void __init mpc85xx_rdb_pic_init(void)
BUG_ON(mpic == NULL);
mpic_init(mpic);
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (np) {
qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
qe_ic_cascade_high_mpic);
of_node_put(np);
} else
pr_err("%s: Could not find qe-ic node\n", __func__);
#endif
}
/*
......
......@@ -19,7 +19,6 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
......@@ -31,26 +30,12 @@ static void __init twr_p1025_pic_init(void)
{
struct mpic *mpic;
#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (np) {
qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
qe_ic_cascade_high_mpic);
of_node_put(np);
} else
pr_err("Could not find qe-ic node\n");
#endif
}
/* ************************************************************************
......
......@@ -74,7 +74,7 @@ config FSL_XGMAC_MDIO
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
select PHYLIB
---help---
......
......@@ -84,8 +84,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
int ret, i;
void *bd_buffer;
dma_addr_t bd_dma_addr;
u32 riptr;
u32 tiptr;
s32 riptr;
s32 tiptr;
u32 gumr;
ut_info = priv->ut_info;
......@@ -195,7 +195,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
if (priv->ucc_pram_offset < 0) {
dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
......@@ -233,18 +233,23 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc riptr, tiptr */
riptr = qe_muram_alloc(32, 32);
if (IS_ERR_VALUE(riptr)) {
if (riptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
ret = -ENOMEM;
goto free_tx_skbuff;
}
tiptr = qe_muram_alloc(32, 32);
if (IS_ERR_VALUE(tiptr)) {
if (tiptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
ret = -ENOMEM;
goto free_riptr;
}
if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
dev_err(priv->dev, "MURAM allocation out of addressable range\n");
ret = -ENOMEM;
goto free_tiptr;
}
/* Set RIPTR, TIPTR */
iowrite16be(riptr, &priv->ucc_pram->riptr);
......@@ -623,8 +628,8 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
if (howmany < budget) {
napi_complete_done(napi, howmany);
qe_setbits32(priv->uccf->p_uccm,
(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
qe_setbits_be32(priv->uccf->p_uccm,
(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
}
return howmany;
......@@ -732,8 +737,8 @@ static int uhdlc_open(struct net_device *dev)
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
{
qe_muram_free(priv->ucc_pram->riptr);
qe_muram_free(priv->ucc_pram->tiptr);
qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
......
......@@ -98,7 +98,7 @@ struct ucc_hdlc_private {
unsigned short tx_ring_size;
unsigned short rx_ring_size;
u32 ucc_pram_offset;
s32 ucc_pram_offset;
unsigned short encoding;
unsigned short parity;
......
......@@ -5,7 +5,8 @@
config QUICC_ENGINE
bool "QUICC Engine (QE) framework support"
depends on FSL_SOC && PPC32
depends on OF && HAS_IOMEM
depends on PPC || ARM || ARM64 || COMPILE_TEST
select GENERIC_ALLOCATOR
select CRC32
help
......
......@@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
qe_gc->cpdata = qe_ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
qe_gc->saved_regs.cpdir1 = qe_ioread32be(&regs->cpdir1);
qe_gc->saved_regs.cpdir2 = qe_ioread32be(&regs->cpdir2);
qe_gc->saved_regs.cppar1 = qe_ioread32be(&regs->cppar1);
qe_gc->saved_regs.cppar2 = qe_ioread32be(&regs->cppar2);
qe_gc->saved_regs.cpodr = qe_ioread32be(&regs->cpodr);
}
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
......@@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct qe_pio_regs __iomem *regs = mm_gc->regs;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
return !!(in_be32(&regs->cpdata) & pin_mask);
return !!(qe_ioread32be(&regs->cpdata) & pin_mask);
}
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
......@@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
else
qe_gc->cpdata &= ~pin_mask;
out_be32(&regs->cpdata, qe_gc->cpdata);
qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
......@@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
}
}
out_be32(&regs->cpdata, qe_gc->cpdata);
qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
......@@ -160,7 +160,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
{
struct qe_pin *qe_pin;
struct gpio_chip *gc;
struct of_mm_gpio_chip *mm_gc;
struct qe_gpio_chip *qe_gc;
int err;
unsigned long flags;
......@@ -186,7 +185,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
goto err0;
}
mm_gc = to_of_mm_gpio_chip(gc);
qe_gc = gpiochip_get_data(gc);
spin_lock_irqsave(&qe_gc->lock, flags);
......@@ -255,11 +253,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
spin_lock_irqsave(&qe_gc->lock, flags);
if (second_reg) {
clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
qe_clrsetbits_be32(&regs->cpdir2, mask2,
sregs->cpdir2 & mask2);
qe_clrsetbits_be32(&regs->cppar2, mask2,
sregs->cppar2 & mask2);
} else {
clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
qe_clrsetbits_be32(&regs->cpdir1, mask2,
sregs->cpdir1 & mask2);
qe_clrsetbits_be32(&regs->cppar1, mask2,
sregs->cppar1 & mask2);
}
if (sregs->cpdata & mask1)
......@@ -267,8 +269,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
else
qe_gc->cpdata &= ~mask1;
out_be32(&regs->cpdata, qe_gc->cpdata);
clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
......
......@@ -22,16 +22,12 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/iopoll.h>
#include <linux/crc32.h>
#include <linux/mod_devicetable.h>
#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
#include <asm/prom.h>
#include <asm/rheap.h>
static void qe_snums_init(void);
static int qe_sdma_init(void);
......@@ -108,11 +104,12 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
{
unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0;
u32 ret;
u32 val;
int ret;
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
} else {
if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */
......@@ -129,20 +126,18 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
out_be32(&qe_immr->cp.cecdr, cmd_input);
out_be32(&qe_immr->cp.cecr,
(cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
mcn_protocol << mcn_shift));
qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
&qe_immr->cp.cecr);
}
/* wait for the QE_CR_FLG to clear */
ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
100, 0);
/* On timeout (e.g. failure), the expression will be false (ret == 0),
otherwise it will be true (ret == 1). */
ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val,
(val & QE_CR_FLG) == 0, 0, 100);
/* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
spin_unlock_irqrestore(&qe_lock, flags);
return ret == 1;
return ret == 0;
}
EXPORT_SYMBOL(qe_issue_cmd);
......@@ -164,8 +159,7 @@ static unsigned int brg_clk = 0;
unsigned int qe_get_brg_clk(void)
{
struct device_node *qe;
int size;
const u32 *prop;
u32 brg;
unsigned int mod;
if (brg_clk)
......@@ -175,9 +169,8 @@ unsigned int qe_get_brg_clk(void)
if (!qe)
return brg_clk;
prop = of_get_property(qe, "brg-frequency", &size);
if (prop && size == sizeof(*prop))
brg_clk = *prop;
if (!of_property_read_u32(qe, "brg-frequency", &brg))
brg_clk = brg;
of_node_put(qe);
......@@ -197,6 +190,14 @@ EXPORT_SYMBOL(qe_get_brg_clk);
#define PVR_VER_836x 0x8083
#define PVR_VER_832x 0x8084
static bool qe_general4_errata(void)
{
#ifdef CONFIG_PPC32
return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
#endif
return false;
}
/* Program the BRG to the given sampling rate and multiplier
*
* @brg: the BRG, QE_BRG1 - QE_BRG16
......@@ -223,14 +224,14 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
that the BRG divisor must be even if you're not using divide-by-16
mode. */
if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
if (qe_general4_errata())
if (!div16 && (divisor & 1) && (divisor > 3))
divisor++;
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16;
out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
return 0;
}
......@@ -364,22 +365,20 @@ EXPORT_SYMBOL(qe_put_snum);
static int qe_sdma_init(void)
{
struct sdma __iomem *sdma = &qe_immr->sdma;
static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
if (!sdma)
return -ENODEV;
static s32 sdma_buf_offset = -ENOMEM;
/* allocate 2 internal temporary buffers (512 bytes size each) for
* the SDMA */
if (IS_ERR_VALUE(sdma_buf_offset)) {
if (sdma_buf_offset < 0) {
sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
if (IS_ERR_VALUE(sdma_buf_offset))
if (sdma_buf_offset < 0)
return -ENOMEM;
}
out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
(0x1 << QE_SDMR_CEN_SHIFT)));
qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
&sdma->sdebcr);
qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
&sdma->sdmr);
return 0;
}
......@@ -417,14 +416,14 @@ static void qe_upload_microcode(const void *base,
"uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */
out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
&qe_immr->iram.iadd);
for (i = 0; i < be32_to_cpu(ucode->count); i++)
out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
}
/*
......@@ -509,7 +508,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
* If the microcode calls for it, split the I-RAM.
*/
if (!firmware->split)
setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
if (firmware->soc.model)
printk(KERN_INFO
......@@ -543,11 +542,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap)
out_be32(&qe_immr->rsp[i].tibcr[j], trap);
qe_iowrite32be(trap,
&qe_immr->rsp[i].tibcr[j]);
}
/* Enable traps */
out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
qe_iowrite32be(be32_to_cpu(ucode->eccr),
&qe_immr->rsp[i].eccr);
}
qe_firmware_uploaded = 1;
......@@ -565,11 +566,9 @@ EXPORT_SYMBOL(qe_upload_firmware);
struct qe_firmware_info *qe_get_firmware_info(void)
{
static int initialized;
struct property *prop;
struct device_node *qe;
struct device_node *fw = NULL;
const char *sprop;
unsigned int i;
/*
* If we haven't checked yet, and a driver hasn't uploaded a firmware
......@@ -603,20 +602,11 @@ struct qe_firmware_info *qe_get_firmware_info(void)
strlcpy(qe_firmware_info.id, sprop,
sizeof(qe_firmware_info.id));
prop = of_find_property(fw, "extended-modes", NULL);
if (prop && (prop->length == sizeof(u64))) {
const u64 *iprop = prop->value;
qe_firmware_info.extended_modes = *iprop;
}
of_property_read_u64(fw, "extended-modes",
&qe_firmware_info.extended_modes);
prop = of_find_property(fw, "virtual-traps", NULL);
if (prop && (prop->length == 32)) {
const u32 *iprop = prop->value;
for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
qe_firmware_info.vtraps[i] = iprop[i];
}
of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
ARRAY_SIZE(qe_firmware_info.vtraps));
of_node_put(fw);
......@@ -627,17 +617,13 @@ EXPORT_SYMBOL(qe_get_firmware_info);
unsigned int qe_get_num_of_risc(void)
{
struct device_node *qe;
int size;
unsigned int num_of_risc = 0;
const u32 *prop;
qe = qe_get_device_node();
if (!qe)
return num_of_risc;
prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
if (prop && size == sizeof(*prop))
num_of_risc = *prop;
of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
of_node_put(qe);
......
......@@ -32,7 +32,7 @@ static phys_addr_t muram_pbase;
struct muram_block {
struct list_head head;
unsigned long start;
s32 start;
int size;
};
......@@ -110,34 +110,30 @@ int cpm_muram_init(void)
* @algo: algorithm for alloc.
* @data: data for genalloc's algorithm.
*
* This function returns an offset into the muram area.
* This function returns a non-negative offset into the muram area, or
* a negative errno on failure.
*/
static unsigned long cpm_muram_alloc_common(unsigned long size,
genpool_algo_t algo, void *data)
static s32 cpm_muram_alloc_common(unsigned long size,
genpool_algo_t algo, void *data)
{
struct muram_block *entry;
unsigned long start;
if (!muram_pool && cpm_muram_init())
goto out2;
s32 start;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
if (!start)
goto out2;
if (!start) {
kfree(entry);
return -ENOMEM;
}
start = start - GENPOOL_OFFSET;
memset_io(cpm_muram_addr(start), 0, size);
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
goto out1;
entry->start = start;
entry->size = size;
list_add(&entry->head, &muram_block_list);
return start;
out1:
gen_pool_free(muram_pool, start, size);
out2:
return (unsigned long)-ENOMEM;
}
/*
......@@ -145,13 +141,14 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
* @size: number of bytes to allocate
* @align: requested alignment, in bytes
*
* This function returns an offset into the muram area.
* This function returns a non-negative offset into the muram area, or
* a negative errno on failure.
* Use cpm_dpram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
s32 cpm_muram_alloc(unsigned long size, unsigned long align)
{
unsigned long start;
s32 start;
unsigned long flags;
struct genpool_data_align muram_pool_data;
......@@ -168,12 +165,15 @@ EXPORT_SYMBOL(cpm_muram_alloc);
* cpm_muram_free - free a chunk of multi-user ram
* @offset: The beginning of the chunk as returned by cpm_muram_alloc().
*/
int cpm_muram_free(unsigned long offset)
void cpm_muram_free(s32 offset)
{
unsigned long flags;
int size;
struct muram_block *tmp;
if (offset < 0)
return;
size = 0;
spin_lock_irqsave(&cpm_muram_lock, flags);
list_for_each_entry(tmp, &muram_block_list, head) {
......@@ -186,7 +186,6 @@ int cpm_muram_free(unsigned long offset)
}
gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
spin_unlock_irqrestore(&cpm_muram_lock, flags);
return size;
}
EXPORT_SYMBOL(cpm_muram_free);
......@@ -194,13 +193,14 @@ EXPORT_SYMBOL(cpm_muram_free);
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
* @offset: offset of allocation start address
* @size: number of bytes to allocate
* This function returns an offset into the muram area
* This function returns @offset if the area was available, a negative
* errno otherwise.
* Use cpm_dpram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
{
unsigned long start;
s32 start;
unsigned long flags;
struct genpool_data_fixed muram_pool_data_fixed;
......
......@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
......@@ -24,9 +25,57 @@
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/qe_ic.h>
#include <soc/fsl/qe/qe.h>
#define NR_QE_IC_INTS 64
/* QE IC registers offset */
#define QEIC_CICR 0x00
#define QEIC_CIVEC 0x04
#define QEIC_CIPXCC 0x10
#define QEIC_CIPYCC 0x14
#define QEIC_CIPWCC 0x18
#define QEIC_CIPZCC 0x1c
#define QEIC_CIMR 0x20
#define QEIC_CRIMR 0x24
#define QEIC_CIPRTA 0x30
#define QEIC_CIPRTB 0x34
#define QEIC_CHIVEC 0x60
struct qe_ic {
/* Control registers offset */
u32 __iomem *regs;
/* The remapper for this QEIC */
struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
/* VIRQ numbers of QE high/low irqs */
unsigned int virq_high;
unsigned int virq_low;
};
#include "qe_ic.h"
/*
* QE interrupt controller internal structure
*/
struct qe_ic_info {
/* Location of this source at the QIMR register */
u32 mask;
/* Mask register offset */
u32 mask_reg;
/*
* For grouped interrupts sources - the interrupt code as
* appears at the group priority register
*/
u8 pri_code;
/* Group priority register offset */
u32 pri_reg;
};
static DEFINE_RAW_SPINLOCK(qe_ic_lock);
......@@ -171,15 +220,15 @@ static struct qe_ic_info qe_ic_info[] = {
},
};
static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
{
return in_be32(base + (reg >> 2));
return qe_ioread32be(base + (reg >> 2));
}
static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
u32 value)
{
out_be32(base + (reg >> 2), value);
qe_iowrite32be(value, base + (reg >> 2));
}
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
......@@ -281,8 +330,8 @@ static const struct irq_domain_ops qe_ic_host_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
/* Return an interrupt vector or 0 if no interrupt is pending. */
static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{
int irq;
......@@ -292,13 +341,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
if (irq == 0)
return NO_IRQ;
return 0;
return irq_linear_revmap(qe_ic->irqhost, irq);
}
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
/* Return an interrupt vector or 0 if no interrupt is pending. */
static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
{
int irq;
......@@ -308,18 +357,60 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
if (irq == 0)
return NO_IRQ;
return 0;
return irq_linear_revmap(qe_ic->irqhost, irq);
}
void __init qe_ic_init(struct device_node *node, unsigned int flags,
void (*low_handler)(struct irq_desc *desc),
void (*high_handler)(struct irq_desc *desc))
static void qe_ic_cascade_low(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != 0)
generic_handle_irq(cascade_irq);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
static void qe_ic_cascade_high(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != 0)
generic_handle_irq(cascade_irq);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
struct irq_chip *chip = irq_desc_get_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == 0)
cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != 0)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void __init qe_ic_init(struct device_node *node)
{
void (*low_handler)(struct irq_desc *desc);
void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
struct resource res;
u32 temp = 0, ret, high_active = 0;
u32 ret;
ret = of_address_to_resource(node, 0, &res);
if (ret)
......@@ -343,166 +434,42 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
qe_ic->virq_low = irq_of_parse_and_map(node, 1);
if (qe_ic->virq_low == NO_IRQ) {
if (!qe_ic->virq_low) {
printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
kfree(qe_ic);
return;
}
/* default priority scheme is grouped. If spread mode is */
/* required, configure cicr accordingly. */
if (flags & QE_IC_SPREADMODE_GRP_W)
temp |= CICR_GWCC;
if (flags & QE_IC_SPREADMODE_GRP_X)
temp |= CICR_GXCC;
if (flags & QE_IC_SPREADMODE_GRP_Y)
temp |= CICR_GYCC;
if (flags & QE_IC_SPREADMODE_GRP_Z)
temp |= CICR_GZCC;
if (flags & QE_IC_SPREADMODE_GRP_RISCA)
temp |= CICR_GRTA;
if (flags & QE_IC_SPREADMODE_GRP_RISCB)
temp |= CICR_GRTB;
/* choose destination signal for highest priority interrupt */
if (flags & QE_IC_HIGH_SIGNAL) {
temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
high_active = 1;
if (qe_ic->virq_high != qe_ic->virq_low) {
low_handler = qe_ic_cascade_low;
high_handler = qe_ic_cascade_high;
} else {
low_handler = qe_ic_cascade_muxed_mpic;
high_handler = NULL;
}
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
irq_set_handler_data(qe_ic->virq_low, qe_ic);
irq_set_chained_handler(qe_ic->virq_low, low_handler);
if (qe_ic->virq_high != NO_IRQ &&
qe_ic->virq_high != qe_ic->virq_low) {
if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
irq_set_handler_data(qe_ic->virq_high, qe_ic);
irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
}
void qe_ic_set_highest_priority(unsigned int virq, int high)
static int __init qe_ic_of_init(void)
{
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
u32 temp = 0;
temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
temp &= ~CICR_HP_MASK;
temp |= src << CICR_HP_SHIFT;
temp &= ~CICR_HPIT_MASK;
temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
}
/* Set Priority level within its group, from 1 to 8 */
int qe_ic_set_priority(unsigned int virq, unsigned int priority)
{
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
u32 temp;
struct device_node *np;
if (priority > 8 || priority == 0)
return -EINVAL;
if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
"%s: Invalid hw irq number for QEIC\n", __func__))
return -EINVAL;
if (qe_ic_info[src].pri_reg == 0)
return -EINVAL;
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
if (priority < 4) {
temp &= ~(0x7 << (32 - priority * 3));
temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
} else {
temp &= ~(0x7 << (24 - priority * 3));
temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np) {
np = of_find_node_by_type(NULL, "qeic");
if (!np)
return -ENODEV;
}
qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
qe_ic_init(np);
of_node_put(np);
return 0;
}
/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
{
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
u32 temp, control_reg = QEIC_CICNR, shift = 0;
if (priority > 2 || priority == 0)
return -EINVAL;
if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
"%s: Invalid hw irq number for QEIC\n", __func__))
return -EINVAL;
switch (qe_ic_info[src].pri_reg) {
case QEIC_CIPZCC:
shift = CICNR_ZCC1T_SHIFT;
break;
case QEIC_CIPWCC:
shift = CICNR_WCC1T_SHIFT;
break;
case QEIC_CIPYCC:
shift = CICNR_YCC1T_SHIFT;
break;
case QEIC_CIPXCC:
shift = CICNR_XCC1T_SHIFT;
break;
case QEIC_CIPRTA:
shift = CRICR_RTA1T_SHIFT;
control_reg = QEIC_CRICR;
break;
case QEIC_CIPRTB:
shift = CRICR_RTB1T_SHIFT;
control_reg = QEIC_CRICR;
break;
default:
return -EINVAL;
}
shift += (2 - priority) * 2;
temp = qe_ic_read(qe_ic->regs, control_reg);
temp &= ~(SIGNAL_MASK << shift);
temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
qe_ic_write(qe_ic->regs, control_reg, temp);
return 0;
}
static struct bus_type qe_ic_subsys = {
.name = "qe_ic",
.dev_name = "qe_ic",
};
static struct device device_qe_ic = {
.id = 0,
.bus = &qe_ic_subsys,
};
static int __init init_qe_ic_sysfs(void)
{
int rc;
printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
rc = subsys_system_register(&qe_ic_subsys, NULL);
if (rc) {
printk(KERN_ERR "Failed registering qe_ic sys class\n");
return -ENODEV;
}
rc = device_register(&device_qe_ic);
if (rc) {
printk(KERN_ERR "Failed registering qe_ic sys device\n");
return -ENODEV;
}
return 0;
}
subsys_initcall(init_qe_ic_sysfs);
subsys_initcall(qe_ic_of_init);
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* drivers/soc/fsl/qe/qe_ic.h
*
* QUICC ENGINE Interrupt Controller Header
*
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <leoli@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
*/
#ifndef _POWERPC_SYSDEV_QE_IC_H
#define _POWERPC_SYSDEV_QE_IC_H
#include <soc/fsl/qe/qe_ic.h>
#define NR_QE_IC_INTS 64
/* QE IC registers offset */
#define QEIC_CICR 0x00
#define QEIC_CIVEC 0x04
#define QEIC_CRIPNR 0x08
#define QEIC_CIPNR 0x0c
#define QEIC_CIPXCC 0x10
#define QEIC_CIPYCC 0x14
#define QEIC_CIPWCC 0x18
#define QEIC_CIPZCC 0x1c
#define QEIC_CIMR 0x20
#define QEIC_CRIMR 0x24
#define QEIC_CICNR 0x28
#define QEIC_CIPRTA 0x30
#define QEIC_CIPRTB 0x34
#define QEIC_CRICR 0x3c
#define QEIC_CHIVEC 0x60
/* Interrupt priority registers */
#define CIPCC_SHIFT_PRI0 29
#define CIPCC_SHIFT_PRI1 26
#define CIPCC_SHIFT_PRI2 23
#define CIPCC_SHIFT_PRI3 20
#define CIPCC_SHIFT_PRI4 13
#define CIPCC_SHIFT_PRI5 10
#define CIPCC_SHIFT_PRI6 7
#define CIPCC_SHIFT_PRI7 4
/* CICR priority modes */
#define CICR_GWCC 0x00040000
#define CICR_GXCC 0x00020000
#define CICR_GYCC 0x00010000
#define CICR_GZCC 0x00080000
#define CICR_GRTA 0x00200000
#define CICR_GRTB 0x00400000
#define CICR_HPIT_SHIFT 8
#define CICR_HPIT_MASK 0x00000300
#define CICR_HP_SHIFT 24
#define CICR_HP_MASK 0x3f000000
/* CICNR */
#define CICNR_WCC1T_SHIFT 20
#define CICNR_ZCC1T_SHIFT 28
#define CICNR_YCC1T_SHIFT 12
#define CICNR_XCC1T_SHIFT 4
/* CRICR */
#define CRICR_RTA1T_SHIFT 20
#define CRICR_RTB1T_SHIFT 28
/* Signal indicator */
#define SIGNAL_MASK 3
#define SIGNAL_HIGH 2
#define SIGNAL_LOW 0
struct qe_ic {
/* Control registers offset */
volatile u32 __iomem *regs;
/* The remapper for this QEIC */
struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
/* VIRQ numbers of QE high/low irqs */
unsigned int virq_high;
unsigned int virq_low;
};
/*
* QE interrupt controller internal structure
*/
struct qe_ic_info {
u32 mask; /* location of this source at the QIMR register. */
u32 mask_reg; /* Mask register offset */
u8 pri_code; /* for grouped interrupts sources - the interrupt
code as appears at the group priority register */
u32 pri_reg; /* Group priority register offset */
};
#endif /* _POWERPC_SYSDEV_QE_IC_H */
......@@ -18,8 +18,6 @@
#include <asm/io.h>
#include <soc/fsl/qe/qe.h>
#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#undef DEBUG
......@@ -30,7 +28,7 @@ int par_io_init(struct device_node *np)
{
struct resource res;
int ret;
const u32 *num_ports;
u32 num_ports;
/* Map Parallel I/O ports registers */
ret = of_address_to_resource(np, 0, &res);
......@@ -38,9 +36,8 @@ int par_io_init(struct device_node *np)
return ret;
par_io = ioremap(res.start, resource_size(&res));
num_ports = of_get_property(np, "num-ports", NULL);
if (num_ports)
num_par_io_ports = *num_ports;
if (!of_property_read_u32(np, "num-ports", &num_ports))
num_par_io_ports = num_ports;
return 0;
}
......@@ -57,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
tmp_val = in_be32(&par_io->cpodr);
tmp_val = qe_ioread32be(&par_io->cpodr);
if (open_drain)
out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
else
out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
/* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
in_be32(&par_io->cpdir2) :
in_be32(&par_io->cpdir1);
qe_ioread32be(&par_io->cpdir2) :
qe_ioread32be(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
......@@ -78,34 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
out_be32(&par_io->cpdir2,
~pin_mask2bits & tmp_val);
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
} else {
out_be32(&par_io->cpdir1,
~pin_mask2bits & tmp_val);
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
}
/* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
in_be32(&par_io->cppar2) :
in_be32(&par_io->cppar1);
qe_ioread32be(&par_io->cppar2) :
qe_ioread32be(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
out_be32(&par_io->cppar2,
~pin_mask2bits & tmp_val);
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
} else {
out_be32(&par_io->cppar1,
~pin_mask2bits & tmp_val);
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
}
}
EXPORT_SYMBOL(__par_io_config_pin);
......@@ -133,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
/* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
tmp_val = in_be32(&par_io[port].cpdata);
tmp_val = qe_ioread32be(&par_io[port].cpdata);
if (val == 0) /* clear */
out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
else /* set */
out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
return 0;
}
......@@ -147,23 +140,20 @@ EXPORT_SYMBOL(par_io_data_set);
int par_io_of_config(struct device_node *np)
{
struct device_node *pio;
const phandle *ph;
int pio_map_len;
const unsigned int *pio_map;
const __be32 *pio_map;
if (par_io == NULL) {
printk(KERN_ERR "par_io not initialized\n");
return -1;
}
ph = of_get_property(np, "pio-handle", NULL);
if (ph == NULL) {
pio = of_parse_phandle(np, "pio-handle", 0);
if (pio == NULL) {
printk(KERN_ERR "pio-handle not available\n");
return -1;
}
pio = of_find_node_by_phandle(*ph);
pio_map = of_get_property(pio, "pio-map", &pio_map_len);
if (pio_map == NULL) {
printk(KERN_ERR "pio-map is not set!\n");
......@@ -176,9 +166,15 @@ int par_io_of_config(struct device_node *np)
}
while (pio_map_len > 0) {
par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
(int) pio_map[2], (int) pio_map[3],
(int) pio_map[4], (int) pio_map[5]);
u8 port = be32_to_cpu(pio_map[0]);
u8 pin = be32_to_cpu(pio_map[1]);
int dir = be32_to_cpu(pio_map[2]);
int open_drain = be32_to_cpu(pio_map[3]);
int assignment = be32_to_cpu(pio_map[4]);
int has_irq = be32_to_cpu(pio_map[5]);
par_io_config_pin(port, pin, dir, open_drain,
assignment, has_irq);
pio_map += 6;
pio_map_len -= 6;
}
......
......@@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
&siram[siram_entry_id * 32 + 0x200 + i]);
}
setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
SIR_LAST);
setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
SIR_LAST);
qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
SIR_LAST);
qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
SIR_LAST);
/* Set SIxMR register */
sixmr = SIMR_SAD(siram_entry_id);
......
......@@ -15,7 +15,6 @@
#include <linux/spinlock.h>
#include <linux/export.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
......@@ -35,8 +34,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
return -EINVAL;
spin_lock_irqsave(&cmxgcr_lock, flags);
clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
spin_unlock_irqrestore(&cmxgcr_lock, flags);
return 0;
......@@ -80,8 +79,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
return -EINVAL;
}
clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
UCC_GUEMR_SET_RESERVED3 | speed);
qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
UCC_GUEMR_SET_RESERVED3 | speed);
return 0;
}
......@@ -109,9 +108,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
if (set)
setbits32(cmxucr, mask << shift);
qe_setbits_be32(cmxucr, mask << shift);
else
clrbits32(cmxucr, mask << shift);
qe_clrbits_be32(cmxucr, mask << shift);
return 0;
}
......@@ -207,8 +206,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
if (mode == COMM_DIR_RX)
shift += 4;
clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
clock_bits << shift);
qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
clock_bits << shift);
return 0;
}
......@@ -540,8 +539,8 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
&qe_mux_reg->cmxsi1cr_h;
qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
clock_bits << shift);
qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
clock_bits << shift);
return 0;
}
......@@ -650,9 +649,9 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
shift = ucc_get_tdm_sync_shift(mode, tdm_num);
qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
QE_CMXUCR_TX_CLK_SRC_MASK << shift,
source << shift);
qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
QE_CMXUCR_TX_CLK_SRC_MASK << shift,
source << shift);
return 0;
}
......@@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
&uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
&uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
&uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
&uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
&uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
&uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
&uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
&uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
&uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
&uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
&uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
&uccf->uf_regs->urfset,
qe_ioread16be(&uccf->uf_regs->urfset));
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
&uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
&uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
&uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
&uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
&uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
&uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
&uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
&uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
}
EXPORT_SYMBOL(ucc_fast_dump_regs);
......@@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{
out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
}
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
......@@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs;
/* Enable reception and/or transmission on this UCC. */
gumr = in_be32(&uf_regs->gumr);
gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr |= UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 1;
......@@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr |= UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 1;
}
out_be32(&uf_regs->gumr, gumr);
qe_iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_enable);
......@@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs;
/* Disable reception and/or transmission on this UCC. */
gumr = in_be32(&uf_regs->gumr);
gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr &= ~UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 0;
......@@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr &= ~UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 0;
}
out_be32(&uf_regs->gumr, gumr);
qe_iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_disable);
......@@ -196,6 +197,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
__func__);
return -ENOMEM;
}
uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
/* Fill fast UCC structure */
uccf->uf_info = uf_info;
......@@ -259,15 +262,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
gumr |= uf_info->tenc;
gumr |= uf_info->tcrc;
gumr |= uf_info->mode;
out_be32(&uf_regs->gumr, gumr);
qe_iowrite32be(gumr, &uf_regs->gumr);
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
__func__);
uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
......@@ -277,24 +279,25 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
qe_muram_alloc(uf_info->urfs +
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
__func__);
uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
/* Set Virtual Fifo registers */
out_be16(&uf_regs->urfs, uf_info->urfs);
out_be16(&uf_regs->urfet, uf_info->urfet);
out_be16(&uf_regs->urfset, uf_info->urfset);
out_be16(&uf_regs->utfs, uf_info->utfs);
out_be16(&uf_regs->utfet, uf_info->utfet);
out_be16(&uf_regs->utftt, uf_info->utftt);
qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
/* utfb, urfb are offsets from MURAM base */
out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
&uf_regs->utfb);
qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
&uf_regs->urfb);
/* Mux clocking */
/* Grant Support */
......@@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
}
/* Set interrupt mask register at UCC level. */
out_be32(&uf_regs->uccm, uf_info->uccm_mask);
qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
out_be32(&uf_regs->ucce, 0xffffffff);
qe_iowrite32be(0xffffffff, &uf_regs->ucce);
*uccf_ret = uccf;
return 0;
......@@ -381,11 +384,8 @@ void ucc_fast_free(struct ucc_fast_private * uccf)
if (!uccf)
return;
if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
if (uccf->uf_regs)
iounmap(uccf->uf_regs);
......
......@@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Enable reception and/or transmission on this UCC. */
gumr_l = in_be32(&us_regs->gumr_l);
gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l |= UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 1;
......@@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l |= UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 1;
}
out_be32(&us_regs->gumr_l, gumr_l);
qe_iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_enable);
......@@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Disable reception and/or transmission on this UCC. */
gumr_l = in_be32(&us_regs->gumr_l);
gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 0;
......@@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 0;
}
out_be32(&us_regs->gumr_l, gumr_l);
qe_iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_disable);
......@@ -154,6 +154,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
__func__);
return -ENOMEM;
}
uccs->rx_base_offset = -1;
uccs->tx_base_offset = -1;
uccs->us_pram_offset = -1;
/* Fill slow UCC structure */
uccs->us_info = us_info;
......@@ -179,7 +182,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* Get PRAM base */
uccs->us_pram_offset =
qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
if (IS_ERR_VALUE(uccs->us_pram_offset)) {
if (uccs->us_pram_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
ucc_slow_free(uccs);
return -ENOMEM;
......@@ -198,7 +201,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return ret;
}
out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
INIT_LIST_HEAD(&uccs->confQ);
......@@ -206,10 +209,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->rx_base_offset =
qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
if (IS_ERR_VALUE(uccs->rx_base_offset)) {
if (uccs->rx_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
us_info->rx_bd_ring_len);
uccs->rx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
}
......@@ -217,9 +219,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->tx_base_offset =
qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
if (IS_ERR_VALUE(uccs->tx_base_offset)) {
if (uccs->tx_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
uccs->tx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
}
......@@ -228,27 +229,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */
out_be32(&bd->buf, 0);
qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
out_be32((u32 *) bd, 0);
qe_iowrite32be(0, (u32 *)bd);
bd++;
}
/* for last BD set Wrap bit */
out_be32(&bd->buf, 0);
out_be32((u32 *) bd, cpu_to_be32(T_W));
qe_iowrite32be(0, &bd->buf);
qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
out_be32((u32*)bd, 0);
qe_iowrite32be(0, (u32 *)bd);
/* clear bd buffer */
out_be32(&bd->buf, 0);
qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
out_be32((u32*)bd, cpu_to_be32(R_W));
out_be32(&bd->buf, 0);
qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */
......@@ -269,7 +270,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_H_TXSY;
if (us_info->rtsm)
gumr |= UCC_SLOW_GUMR_H_RTSM;
out_be32(&us_regs->gumr_h, gumr);
qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
......@@ -282,7 +283,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND;
out_be32(&us_regs->gumr_l, gumr);
qe_iowrite32be(gumr, &us_regs->gumr_l);
/* Function code registers */
......@@ -292,8 +293,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
/* rbase, tbase are offsets from MURAM base */
out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
/* Mux clocking */
/* Grant Support */
......@@ -323,14 +324,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
}
/* Set interrupt mask register at UCC level. */
out_be16(&us_regs->uccm, us_info->uccm_mask);
qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
out_be16(&us_regs->ucce, 0xffff);
qe_iowrite16be(0xffff, &us_regs->ucce);
/* Issue QE Init command */
if (us_info->init_tx && us_info->init_rx)
......@@ -352,14 +353,9 @@ void ucc_slow_free(struct ucc_slow_private * uccs)
if (!uccs)
return;
if (uccs->rx_base_offset)
qe_muram_free(uccs->rx_base_offset);
if (uccs->tx_base_offset)
qe_muram_free(uccs->tx_base_offset);
if (uccs->us_pram)
qe_muram_free(uccs->us_pram_offset);
qe_muram_free(uccs->rx_base_offset);
qe_muram_free(uccs->tx_base_offset);
qe_muram_free(uccs->us_pram_offset);
if (uccs->us_regs)
iounmap(uccs->us_regs);
......
......@@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate)
spin_lock_irqsave(&cmxgcr_lock, flags);
clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
spin_unlock_irqrestore(&cmxgcr_lock, flags);
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CPM_H
#define __CPM_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <soc/fsl/qe/qe.h>
/*
* SPI Parameter RAM common to QE and CPM.
*/
struct spi_pram {
__be16 rbase; /* Rx Buffer descriptor base address */
__be16 tbase; /* Tx Buffer descriptor base address */
u8 rfcr; /* Rx function code */
u8 tfcr; /* Tx function code */
__be16 mrblr; /* Max receive buffer length */
__be32 rstate; /* Internal */
__be32 rdp; /* Internal */
__be16 rbptr; /* Internal */
__be16 rbc; /* Internal */
__be32 rxtmp; /* Internal */
__be32 tstate; /* Internal */
__be32 tdp; /* Internal */
__be16 tbptr; /* Internal */
__be16 tbc; /* Internal */
__be32 txtmp; /* Internal */
__be32 res; /* Tx temp. */
__be16 rpbase; /* Relocation pointer (CPM1 only) */
__be16 res1; /* Reserved */
};
/*
* USB Controller pram common to QE and CPM.
*/
struct usb_ctlr {
u8 usb_usmod;
u8 usb_usadr;
u8 usb_uscom;
u8 res1[1];
__be16 usb_usep[4];
u8 res2[4];
__be16 usb_usber;
u8 res3[2];
__be16 usb_usbmr;
u8 res4[1];
u8 usb_usbs;
/* Fields down below are QE-only */
__be16 usb_ussft;
u8 res5[2];
__be16 usb_usfrn;
u8 res6[0x22];
} __attribute__ ((packed));
/*
* Function code bits, usually generic to devices.
*/
#ifdef CONFIG_CPM1
#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
#else
#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
#endif
#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
/* Opcodes common to CPM1 and CPM2
*/
#define CPM_CR_INIT_TRX ((ushort)0x0000)
#define CPM_CR_INIT_RX ((ushort)0x0001)
#define CPM_CR_INIT_TX ((ushort)0x0002)
#define CPM_CR_HUNT_MODE ((ushort)0x0003)
#define CPM_CR_STOP_TX ((ushort)0x0004)
#define CPM_CR_GRA_STOP_TX ((ushort)0x0005)
#define CPM_CR_RESTART_TX ((ushort)0x0006)
#define CPM_CR_CLOSE_RX_BD ((ushort)0x0007)
#define CPM_CR_SET_GADDR ((ushort)0x0008)
#define CPM_CR_SET_TIMER ((ushort)0x0008)
#define CPM_CR_STOP_IDMA ((ushort)0x000b)
/* Buffer descriptors used by many of the CPM protocols. */
typedef struct cpm_buf_desc {
ushort cbd_sc; /* Status and Control */
ushort cbd_datlen; /* Data length in buffer */
uint cbd_bufaddr; /* Buffer address in host memory */
} cbd_t;
/* Buffer descriptor control/status used by serial
*/
#define BD_SC_EMPTY (0x8000) /* Receive is empty */
#define BD_SC_READY (0x8000) /* Transmit is ready */
#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */
#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
#define BD_SC_LAST (0x0800) /* Last buffer in frame */
#define BD_SC_TC (0x0400) /* Transmit CRC */
#define BD_SC_CM (0x0200) /* Continuous mode */
#define BD_SC_ID (0x0100) /* Rec'd too many idles */
#define BD_SC_P (0x0100) /* xmt preamble */
#define BD_SC_BR (0x0020) /* Break received */
#define BD_SC_FR (0x0010) /* Framing error */
#define BD_SC_PR (0x0008) /* Parity error */
#define BD_SC_NAK (0x0004) /* NAK - did not respond */
#define BD_SC_OV (0x0002) /* Overrun */
#define BD_SC_UN (0x0002) /* Underrun */
#define BD_SC_CD (0x0001) /* */
#define BD_SC_CL (0x0001) /* Collision */
/* Buffer descriptor control/status used by Ethernet receive.
* Common to SCC and FCC.
*/
#define BD_ENET_RX_EMPTY (0x8000)
#define BD_ENET_RX_WRAP (0x2000)
#define BD_ENET_RX_INTR (0x1000)
#define BD_ENET_RX_LAST (0x0800)
#define BD_ENET_RX_FIRST (0x0400)
#define BD_ENET_RX_MISS (0x0100)
#define BD_ENET_RX_BC (0x0080) /* FCC Only */
#define BD_ENET_RX_MC (0x0040) /* FCC Only */
#define BD_ENET_RX_LG (0x0020)
#define BD_ENET_RX_NO (0x0010)
#define BD_ENET_RX_SH (0x0008)
#define BD_ENET_RX_CR (0x0004)
#define BD_ENET_RX_OV (0x0002)
#define BD_ENET_RX_CL (0x0001)
#define BD_ENET_RX_STATS (0x01ff) /* All status bits */
/* Buffer descriptor control/status used by Ethernet transmit.
* Common to SCC and FCC.
*/
#define BD_ENET_TX_READY (0x8000)
#define BD_ENET_TX_PAD (0x4000)
#define BD_ENET_TX_WRAP (0x2000)
#define BD_ENET_TX_INTR (0x1000)
#define BD_ENET_TX_LAST (0x0800)
#define BD_ENET_TX_TC (0x0400)
#define BD_ENET_TX_DEF (0x0200)
#define BD_ENET_TX_HB (0x0100)
#define BD_ENET_TX_LC (0x0080)
#define BD_ENET_TX_RL (0x0040)
#define BD_ENET_TX_RCMASK (0x003c)
#define BD_ENET_TX_UN (0x0002)
#define BD_ENET_TX_CSL (0x0001)
#define BD_ENET_TX_STATS (0x03ff) /* All status bits */
/* Buffer descriptor control/status used by Transparent mode SCC.
*/
#define BD_SCC_TX_LAST (0x0800)
/* Buffer descriptor control/status used by I2C.
*/
#define BD_I2C_START (0x0400)
#ifdef CONFIG_CPM
int cpm_command(u32 command, u8 opcode);
#else
static inline int cpm_command(u32 command, u8 opcode)
{
return -ENOSYS;
}
#endif /* CONFIG_CPM */
int cpm2_gpiochip_add32(struct device *dev);
#endif
......@@ -17,7 +17,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <asm/cpm.h>
#include <soc/fsl/cpm.h>
#include <soc/fsl/qe/immap_qe.h>
#include <linux/of.h>
#include <linux/of_address.h>
......@@ -98,26 +98,25 @@ static inline void qe_reset(void) {}
int cpm_muram_init(void);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
int cpm_muram_free(unsigned long offset);
unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
s32 cpm_muram_alloc(unsigned long size, unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
#else
static inline unsigned long cpm_muram_alloc(unsigned long size,
unsigned long align)
static inline s32 cpm_muram_alloc(unsigned long size,
unsigned long align)
{
return -ENOSYS;
}
static inline int cpm_muram_free(unsigned long offset)
static inline void cpm_muram_free(s32 offset)
{
return -ENOSYS;
}
static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
unsigned long size)
static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
unsigned long size)
{
return -ENOSYS;
}
......@@ -241,21 +240,37 @@ static inline int qe_alive_during_sleep(void)
#define qe_muram_offset cpm_muram_offset
#define qe_muram_dma cpm_muram_dma
#define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
#define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
#ifdef CONFIG_PPC32
#define qe_iowrite8(val, addr) out_8(addr, val)
#define qe_iowrite16be(val, addr) out_be16(addr, val)
#define qe_iowrite32be(val, addr) out_be32(addr, val)
#define qe_ioread8(addr) in_8(addr)
#define qe_ioread16be(addr) in_be16(addr)
#define qe_ioread32be(addr) in_be32(addr)
#else
#define qe_iowrite8(val, addr) iowrite8(val, addr)
#define qe_iowrite16be(val, addr) iowrite16be(val, addr)
#define qe_iowrite32be(val, addr) iowrite32be(val, addr)
#define qe_ioread8(addr) ioread8(addr)
#define qe_ioread16be(addr) ioread16be(addr)
#define qe_ioread32be(addr) ioread32be(addr)
#endif
#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr))
#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr))
#define qe_setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
#define qe_clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr))
#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr))
#define qe_setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
#define qe_clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr))
#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr))
#define qe_clrsetbits32(addr, clear, set) \
iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits16(addr, clear, set) \
iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits8(addr, clear, set) \
iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_be32(addr, clear, set) \
qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_be16(addr, clear, set) \
qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr))
#define qe_clrsetbits_8(addr, clear, set) \
qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr))
/* Structure that defines QE firmware binary files.
*
......
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
*
* Description:
* QE IC external definitions and structure.
*/
#ifndef _ASM_POWERPC_QE_IC_H
#define _ASM_POWERPC_QE_IC_H
#include <linux/irq.h>
struct device_node;
struct qe_ic;
#define NUM_OF_QE_IC_GROUPS 6
/* Flags when we init the QE IC */
#define QE_IC_SPREADMODE_GRP_W 0x00000001
#define QE_IC_SPREADMODE_GRP_X 0x00000002
#define QE_IC_SPREADMODE_GRP_Y 0x00000004
#define QE_IC_SPREADMODE_GRP_Z 0x00000008
#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
#define QE_IC_LOW_SIGNAL 0x00000100
#define QE_IC_HIGH_SIGNAL 0x00000200
#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
/* QE interrupt sources groups */
enum qe_ic_grp_id {
QE_IC_GRP_W = 0, /* QE interrupt controller group W */
QE_IC_GRP_X, /* QE interrupt controller group X */
QE_IC_GRP_Y, /* QE interrupt controller group Y */
QE_IC_GRP_Z, /* QE interrupt controller group Z */
QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
};
#ifdef CONFIG_QUICC_ENGINE
void qe_ic_init(struct device_node *node, unsigned int flags,
void (*low_handler)(struct irq_desc *desc),
void (*high_handler)(struct irq_desc *desc));
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
#else
static inline void qe_ic_init(struct device_node *node, unsigned int flags,
void (*low_handler)(struct irq_desc *desc),
void (*high_handler)(struct irq_desc *desc))
{}
static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{ return 0; }
static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
{ return 0; }
#endif /* CONFIG_QUICC_ENGINE */
void qe_ic_set_highest_priority(unsigned int virq, int high);
int qe_ic_set_priority(unsigned int virq, unsigned int priority);
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
}
static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
}
static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
struct irq_chip *chip = irq_desc_get_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == NO_IRQ)
cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
#endif /* _ASM_POWERPC_QE_IC_H */
......@@ -188,9 +188,9 @@ struct ucc_fast_private {
int stopped_tx; /* Whether channel has been stopped for Tx
(STOP_TX, etc.) */
int stopped_rx; /* Whether channel has been stopped for Rx */
u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
s32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
virtual fifo */
u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
s32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
virtual fifo */
#ifdef STATISTICS
u32 tx_frames; /* Transmitted frames counter. */
......
......@@ -185,7 +185,7 @@ struct ucc_slow_private {
struct ucc_slow_info *us_info;
struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
u32 us_pram_offset;
s32 us_pram_offset;
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
int stopped_tx; /* Whether channel has been stopped for Tx
......@@ -194,8 +194,8 @@ struct ucc_slow_private {
struct list_head confQ; /* frames passed to chip waiting for tx */
u32 first_tx_bd_mask; /* mask is used in Tx routine to save status
and length for first BD in a frame */
u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
struct qe_bd *confBd; /* next BD for confirm after Tx */
struct qe_bd *tx_bd; /* next BD for new Tx request */
struct qe_bd *rx_bd; /* next BD to collect after Rx */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment