Commit 77d7676a authored by Rasmus Villemoes's avatar Rasmus Villemoes Committed by Li Yang

soc: fsl: qe: avoid ppc-specific io accessors

In preparation for allowing to build QE support for architectures
other than PPC, replace the ppc-specific io accessors by the qe_io*
macros. Done via

$ spatch --sp-file io.cocci --in-place drivers/soc/fsl/qe/

where io.cocci is

@@
expression addr, val;
@@
- out_be32(addr, val)
+ qe_iowrite32be(val, addr)

@@
expression addr;
@@
- in_be32(addr)
+ qe_ioread32be(addr)

@@
expression addr, val;
@@
- out_be16(addr, val)
+ qe_iowrite16be(val, addr)

@@
expression addr;
@@
- in_be16(addr)
+ qe_ioread16be(addr)

@@
expression addr, val;
@@
- out_8(addr, val)
+ qe_iowrite8(val, addr)

@@
expression addr;
@@
- in_8(addr)
+ qe_ioread8(addr)

@@
expression addr, clr, set;
@@
- clrsetbits_be32(addr, clr, set)
+ qe_clrsetbits_be32(addr, clr, set)

@@
expression addr, clr, set;
@@
- clrsetbits_be16(addr, clr, set)
+ qe_clrsetbits_be16(addr, clr, set)

@@
expression addr, clr, set;
@@
- clrsetbits_8(addr, clr, set)
+ qe_clrsetbits_8(addr, clr, set)

@@
expression addr, set;
@@
- setbits32(addr, set)
+ qe_setbits_be32(addr, set)

@@
expression addr, set;
@@
- setbits16(addr, set)
+ qe_setbits_be16(addr, set)

@@
expression addr, set;
@@
- setbits8(addr, set)
+ qe_setbits_8(addr, set)

@@
expression addr, clr;
@@
- clrbits32(addr, clr)
+ qe_clrbits_be32(addr, clr)

@@
expression addr, clr;
@@
- clrbits16(addr, clr)
+ qe_clrbits_be16(addr, clr)

@@
expression addr, clr;
@@
- clrbits8(addr, clr)
+ qe_clrbits_8(addr, clr)
Reviewed-by: default avatarTimur Tabi <timur@kernel.org>
Signed-off-by: default avatarRasmus Villemoes <linux@rasmusvillemoes.dk>
Signed-off-by: default avatarLi Yang <leoyang.li@nxp.com>
parent 6ac9b617
...@@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) ...@@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
container_of(mm_gc, struct qe_gpio_chip, mm_gc); container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs; struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata); qe_gc->cpdata = qe_ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata; qe_gc->saved_regs.cpdata = qe_gc->cpdata;
qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1); qe_gc->saved_regs.cpdir1 = qe_ioread32be(&regs->cpdir1);
qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2); qe_gc->saved_regs.cpdir2 = qe_ioread32be(&regs->cpdir2);
qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1); qe_gc->saved_regs.cppar1 = qe_ioread32be(&regs->cppar1);
qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2); qe_gc->saved_regs.cppar2 = qe_ioread32be(&regs->cppar2);
qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr); qe_gc->saved_regs.cpodr = qe_ioread32be(&regs->cpodr);
} }
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
...@@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) ...@@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct qe_pio_regs __iomem *regs = mm_gc->regs; struct qe_pio_regs __iomem *regs = mm_gc->regs;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
return !!(in_be32(&regs->cpdata) & pin_mask); return !!(qe_ioread32be(&regs->cpdata) & pin_mask);
} }
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
...@@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) ...@@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
else else
qe_gc->cpdata &= ~pin_mask; qe_gc->cpdata &= ~pin_mask;
out_be32(&regs->cpdata, qe_gc->cpdata); qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags); spin_unlock_irqrestore(&qe_gc->lock, flags);
} }
...@@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc, ...@@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
} }
} }
out_be32(&regs->cpdata, qe_gc->cpdata); qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags); spin_unlock_irqrestore(&qe_gc->lock, flags);
} }
...@@ -255,11 +255,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) ...@@ -255,11 +255,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
spin_lock_irqsave(&qe_gc->lock, flags); spin_lock_irqsave(&qe_gc->lock, flags);
if (second_reg) { if (second_reg) {
clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2); qe_clrsetbits_be32(&regs->cpdir2, mask2,
clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2); sregs->cpdir2 & mask2);
qe_clrsetbits_be32(&regs->cppar2, mask2,
sregs->cppar2 & mask2);
} else { } else {
clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2); qe_clrsetbits_be32(&regs->cpdir1, mask2,
clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2); sregs->cpdir1 & mask2);
qe_clrsetbits_be32(&regs->cppar1, mask2,
sregs->cppar1 & mask2);
} }
if (sregs->cpdata & mask1) if (sregs->cpdata & mask1)
...@@ -267,8 +271,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) ...@@ -267,8 +271,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
else else
qe_gc->cpdata &= ~mask1; qe_gc->cpdata &= ~mask1;
out_be32(&regs->cpdata, qe_gc->cpdata); qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1); qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
spin_unlock_irqrestore(&qe_gc->lock, flags); spin_unlock_irqrestore(&qe_gc->lock, flags);
} }
......
...@@ -112,7 +112,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) ...@@ -112,7 +112,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
spin_lock_irqsave(&qe_lock, flags); spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) { if (cmd == QE_RESET) {
out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
} else { } else {
if (cmd == QE_ASSIGN_PAGE) { if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */ /* Here device is the SNUM, not sub-block */
...@@ -129,15 +129,14 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) ...@@ -129,15 +129,14 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT; mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
} }
out_be32(&qe_immr->cp.cecdr, cmd_input); qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
out_be32(&qe_immr->cp.cecr, qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
(cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) &qe_immr->cp.cecr);
mcn_protocol << mcn_shift));
} }
/* wait for the QE_CR_FLG to clear */ /* wait for the QE_CR_FLG to clear */
ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, ret = spin_event_timeout((qe_ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
100, 0); 100, 0);
/* On timeout (e.g. failure), the expression will be false (ret == 0), /* On timeout (e.g. failure), the expression will be false (ret == 0),
otherwise it will be true (ret == 1). */ otherwise it will be true (ret == 1). */
spin_unlock_irqrestore(&qe_lock, flags); spin_unlock_irqrestore(&qe_lock, flags);
...@@ -230,7 +229,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) ...@@ -230,7 +229,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16; QE_BRGC_ENABLE | div16;
out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
return 0; return 0;
} }
...@@ -377,9 +376,10 @@ static int qe_sdma_init(void) ...@@ -377,9 +376,10 @@ static int qe_sdma_init(void)
return -ENOMEM; return -ENOMEM;
} }
out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | &sdma->sdebcr);
(0x1 << QE_SDMR_CEN_SHIFT))); qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
&sdma->sdmr);
return 0; return 0;
} }
...@@ -417,14 +417,14 @@ static void qe_upload_microcode(const void *base, ...@@ -417,14 +417,14 @@ static void qe_upload_microcode(const void *base,
"uploading microcode '%s'\n", ucode->id); "uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */ /* Use auto-increment */
out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); &qe_immr->iram.iadd);
for (i = 0; i < be32_to_cpu(ucode->count); i++) for (i = 0; i < be32_to_cpu(ucode->count); i++)
out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */ /* Set I-RAM Ready Register */
out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY)); qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
} }
/* /*
...@@ -509,7 +509,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware) ...@@ -509,7 +509,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
* If the microcode calls for it, split the I-RAM. * If the microcode calls for it, split the I-RAM.
*/ */
if (!firmware->split) if (!firmware->split)
setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
if (firmware->soc.model) if (firmware->soc.model)
printk(KERN_INFO printk(KERN_INFO
...@@ -543,11 +543,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware) ...@@ -543,11 +543,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
u32 trap = be32_to_cpu(ucode->traps[j]); u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap) if (trap)
out_be32(&qe_immr->rsp[i].tibcr[j], trap); qe_iowrite32be(trap,
&qe_immr->rsp[i].tibcr[j]);
} }
/* Enable traps */ /* Enable traps */
out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); qe_iowrite32be(be32_to_cpu(ucode->eccr),
&qe_immr->rsp[i].eccr);
} }
qe_firmware_uploaded = 1; qe_firmware_uploaded = 1;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h> #include <soc/fsl/qe/qe_ic.h>
#include "qe_ic.h" #include "qe_ic.h"
...@@ -173,13 +174,13 @@ static struct qe_ic_info qe_ic_info[] = { ...@@ -173,13 +174,13 @@ static struct qe_ic_info qe_ic_info[] = {
static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg) static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
{ {
return in_be32(base + (reg >> 2)); return qe_ioread32be(base + (reg >> 2));
} }
static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
u32 value) u32 value)
{ {
out_be32(base + (reg >> 2), value); qe_iowrite32be(value, base + (reg >> 2));
} }
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
......
...@@ -57,16 +57,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, ...@@ -57,16 +57,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */ /* Set open drain, if required */
tmp_val = in_be32(&par_io->cpodr); tmp_val = qe_ioread32be(&par_io->cpodr);
if (open_drain) if (open_drain)
out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
else else
out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
/* define direction */ /* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
in_be32(&par_io->cpdir2) : qe_ioread32be(&par_io->cpdir2) :
in_be32(&par_io->cpdir1); qe_ioread32be(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */ /* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
...@@ -78,34 +78,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, ...@@ -78,34 +78,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
/* clear and set 2 bits mask */ /* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) { if (pin > (QE_PIO_PINS / 2) - 1) {
out_be32(&par_io->cpdir2, qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits; tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
} else { } else {
out_be32(&par_io->cpdir1, qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits; tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
} }
/* define pin assignment */ /* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
in_be32(&par_io->cppar2) : qe_ioread32be(&par_io->cppar2) :
in_be32(&par_io->cppar1); qe_ioread32be(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS - new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2)); (pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */ /* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) { if (pin > (QE_PIO_PINS / 2) - 1) {
out_be32(&par_io->cppar2, qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits; tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cppar2, new_mask2bits | tmp_val); qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
} else { } else {
out_be32(&par_io->cppar1, qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits; tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cppar1, new_mask2bits | tmp_val); qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
} }
} }
EXPORT_SYMBOL(__par_io_config_pin); EXPORT_SYMBOL(__par_io_config_pin);
...@@ -133,12 +129,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val) ...@@ -133,12 +129,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
/* calculate pin location */ /* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
tmp_val = in_be32(&par_io[port].cpdata); tmp_val = qe_ioread32be(&par_io[port].cpdata);
if (val == 0) /* clear */ if (val == 0) /* clear */
out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
else /* set */ else /* set */
out_be32(&par_io[port].cpdata, pin_mask | tmp_val); qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
return 0; return 0;
} }
......
...@@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info) ...@@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
&siram[siram_entry_id * 32 + 0x200 + i]); &siram[siram_entry_id * 32 + 0x200 + i]);
} }
setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
SIR_LAST); SIR_LAST);
setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
SIR_LAST); SIR_LAST);
/* Set SIxMR register */ /* Set SIxMR register */
sixmr = SIMR_SAD(siram_entry_id); sixmr = SIMR_SAD(siram_entry_id);
......
...@@ -35,8 +35,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) ...@@ -35,8 +35,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&cmxgcr_lock, flags); spin_lock_irqsave(&cmxgcr_lock, flags);
clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
spin_unlock_irqrestore(&cmxgcr_lock, flags); spin_unlock_irqrestore(&cmxgcr_lock, flags);
return 0; return 0;
...@@ -80,8 +80,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) ...@@ -80,8 +80,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
return -EINVAL; return -EINVAL;
} }
clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
UCC_GUEMR_SET_RESERVED3 | speed); UCC_GUEMR_SET_RESERVED3 | speed);
return 0; return 0;
} }
...@@ -109,9 +109,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) ...@@ -109,9 +109,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
if (set) if (set)
setbits32(cmxucr, mask << shift); qe_setbits_be32(cmxucr, mask << shift);
else else
clrbits32(cmxucr, mask << shift); qe_clrbits_be32(cmxucr, mask << shift);
return 0; return 0;
} }
...@@ -207,8 +207,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, ...@@ -207,8 +207,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
if (mode == COMM_DIR_RX) if (mode == COMM_DIR_RX)
shift += 4; shift += 4;
clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
clock_bits << shift); clock_bits << shift);
return 0; return 0;
} }
......
...@@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf) ...@@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
&uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); &uccf->uf_regs->urfset,
qe_ioread16be(&uccf->uf_regs->urfset));
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
&uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
} }
EXPORT_SYMBOL(ucc_fast_dump_regs); EXPORT_SYMBOL(ucc_fast_dump_regs);
...@@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); ...@@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{ {
out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
} }
EXPORT_SYMBOL(ucc_fast_transmit_on_demand); EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
...@@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) ...@@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs; uf_regs = uccf->uf_regs;
/* Enable reception and/or transmission on this UCC. */ /* Enable reception and/or transmission on this UCC. */
gumr = in_be32(&uf_regs->gumr); gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) { if (mode & COMM_DIR_TX) {
gumr |= UCC_FAST_GUMR_ENT; gumr |= UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 1; uccf->enabled_tx = 1;
...@@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) ...@@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr |= UCC_FAST_GUMR_ENR; gumr |= UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 1; uccf->enabled_rx = 1;
} }
out_be32(&uf_regs->gumr, gumr); qe_iowrite32be(gumr, &uf_regs->gumr);
} }
EXPORT_SYMBOL(ucc_fast_enable); EXPORT_SYMBOL(ucc_fast_enable);
...@@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) ...@@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs; uf_regs = uccf->uf_regs;
/* Disable reception and/or transmission on this UCC. */ /* Disable reception and/or transmission on this UCC. */
gumr = in_be32(&uf_regs->gumr); gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) { if (mode & COMM_DIR_TX) {
gumr &= ~UCC_FAST_GUMR_ENT; gumr &= ~UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 0; uccf->enabled_tx = 0;
...@@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) ...@@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr &= ~UCC_FAST_GUMR_ENR; gumr &= ~UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 0; uccf->enabled_rx = 0;
} }
out_be32(&uf_regs->gumr, gumr); qe_iowrite32be(gumr, &uf_regs->gumr);
} }
EXPORT_SYMBOL(ucc_fast_disable); EXPORT_SYMBOL(ucc_fast_disable);
...@@ -259,7 +260,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc ...@@ -259,7 +260,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
gumr |= uf_info->tenc; gumr |= uf_info->tenc;
gumr |= uf_info->tcrc; gumr |= uf_info->tcrc;
gumr |= uf_info->mode; gumr |= uf_info->mode;
out_be32(&uf_regs->gumr, gumr); qe_iowrite32be(gumr, &uf_regs->gumr);
/* Allocate memory for Tx Virtual Fifo */ /* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset = uccf->ucc_fast_tx_virtual_fifo_base_offset =
...@@ -286,15 +287,17 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc ...@@ -286,15 +287,17 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
} }
/* Set Virtual Fifo registers */ /* Set Virtual Fifo registers */
out_be16(&uf_regs->urfs, uf_info->urfs); qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
out_be16(&uf_regs->urfet, uf_info->urfet); qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
out_be16(&uf_regs->urfset, uf_info->urfset); qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
out_be16(&uf_regs->utfs, uf_info->utfs); qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
out_be16(&uf_regs->utfet, uf_info->utfet); qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
out_be16(&uf_regs->utftt, uf_info->utftt); qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
/* utfb, urfb are offsets from MURAM base */ /* utfb, urfb are offsets from MURAM base */
out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); &uf_regs->utfb);
qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
&uf_regs->urfb);
/* Mux clocking */ /* Mux clocking */
/* Grant Support */ /* Grant Support */
...@@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc ...@@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
} }
/* Set interrupt mask register at UCC level. */ /* Set interrupt mask register at UCC level. */
out_be32(&uf_regs->uccm, uf_info->uccm_mask); qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
/* First, clear anything pending at UCC level, /* First, clear anything pending at UCC level,
* otherwise, old garbage may come through * otherwise, old garbage may come through
* as soon as the dam is opened. */ * as soon as the dam is opened. */
/* Writing '1' clears */ /* Writing '1' clears */
out_be32(&uf_regs->ucce, 0xffffffff); qe_iowrite32be(0xffffffff, &uf_regs->ucce);
*uccf_ret = uccf; *uccf_ret = uccf;
return 0; return 0;
......
...@@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) ...@@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs; us_regs = uccs->us_regs;
/* Enable reception and/or transmission on this UCC. */ /* Enable reception and/or transmission on this UCC. */
gumr_l = in_be32(&us_regs->gumr_l); gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) { if (mode & COMM_DIR_TX) {
gumr_l |= UCC_SLOW_GUMR_L_ENT; gumr_l |= UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 1; uccs->enabled_tx = 1;
...@@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) ...@@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l |= UCC_SLOW_GUMR_L_ENR; gumr_l |= UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 1; uccs->enabled_rx = 1;
} }
out_be32(&us_regs->gumr_l, gumr_l); qe_iowrite32be(gumr_l, &us_regs->gumr_l);
} }
EXPORT_SYMBOL(ucc_slow_enable); EXPORT_SYMBOL(ucc_slow_enable);
...@@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) ...@@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs; us_regs = uccs->us_regs;
/* Disable reception and/or transmission on this UCC. */ /* Disable reception and/or transmission on this UCC. */
gumr_l = in_be32(&us_regs->gumr_l); gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) { if (mode & COMM_DIR_TX) {
gumr_l &= ~UCC_SLOW_GUMR_L_ENT; gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 0; uccs->enabled_tx = 0;
...@@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) ...@@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l &= ~UCC_SLOW_GUMR_L_ENR; gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 0; uccs->enabled_rx = 0;
} }
out_be32(&us_regs->gumr_l, gumr_l); qe_iowrite32be(gumr_l, &us_regs->gumr_l);
} }
EXPORT_SYMBOL(ucc_slow_disable); EXPORT_SYMBOL(ucc_slow_disable);
...@@ -198,7 +198,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc ...@@ -198,7 +198,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return ret; return ret;
} }
out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
INIT_LIST_HEAD(&uccs->confQ); INIT_LIST_HEAD(&uccs->confQ);
...@@ -228,27 +228,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc ...@@ -228,27 +228,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */ /* clear bd buffer */
out_be32(&bd->buf, 0); qe_iowrite32be(0, &bd->buf);
/* set bd status and length */ /* set bd status and length */
out_be32((u32 *) bd, 0); qe_iowrite32be(0, (u32 *)bd);
bd++; bd++;
} }
/* for last BD set Wrap bit */ /* for last BD set Wrap bit */
out_be32(&bd->buf, 0); qe_iowrite32be(0, &bd->buf);
out_be32((u32 *) bd, cpu_to_be32(T_W)); qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
/* Init Rx bds */ /* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */ /* set bd status and length */
out_be32((u32*)bd, 0); qe_iowrite32be(0, (u32 *)bd);
/* clear bd buffer */ /* clear bd buffer */
out_be32(&bd->buf, 0); qe_iowrite32be(0, &bd->buf);
bd++; bd++;
} }
/* for last BD set Wrap bit */ /* for last BD set Wrap bit */
out_be32((u32*)bd, cpu_to_be32(R_W)); qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
out_be32(&bd->buf, 0); qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */ /* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */ /* gumr_h */
...@@ -269,7 +269,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc ...@@ -269,7 +269,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_H_TXSY; gumr |= UCC_SLOW_GUMR_H_TXSY;
if (us_info->rtsm) if (us_info->rtsm)
gumr |= UCC_SLOW_GUMR_H_RTSM; gumr |= UCC_SLOW_GUMR_H_RTSM;
out_be32(&us_regs->gumr_h, gumr); qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */ /* gumr_l */
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
...@@ -282,7 +282,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc ...@@ -282,7 +282,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_L_TINV; gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend) if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND; gumr |= UCC_SLOW_GUMR_L_TEND;
out_be32(&us_regs->gumr_l, gumr); qe_iowrite32be(gumr, &us_regs->gumr_l);
/* Function code registers */ /* Function code registers */
...@@ -292,8 +292,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc ...@@ -292,8 +292,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->us_pram->rbmr = UCC_BMR_BO_BE; uccs->us_pram->rbmr = UCC_BMR_BO_BE;
/* rbase, tbase are offsets from MURAM base */ /* rbase, tbase are offsets from MURAM base */
out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
/* Mux clocking */ /* Mux clocking */
/* Grant Support */ /* Grant Support */
...@@ -323,14 +323,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc ...@@ -323,14 +323,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
} }
/* Set interrupt mask register at UCC level. */ /* Set interrupt mask register at UCC level. */
out_be16(&us_regs->uccm, us_info->uccm_mask); qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
/* First, clear anything pending at UCC level, /* First, clear anything pending at UCC level,
* otherwise, old garbage may come through * otherwise, old garbage may come through
* as soon as the dam is opened. */ * as soon as the dam is opened. */
/* Writing '1' clears */ /* Writing '1' clears */
out_be16(&us_regs->ucce, 0xffff); qe_iowrite16be(0xffff, &us_regs->ucce);
/* Issue QE Init command */ /* Issue QE Init command */
if (us_info->init_tx && us_info->init_rx) if (us_info->init_tx && us_info->init_rx)
......
...@@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate) ...@@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate)
spin_lock_irqsave(&cmxgcr_lock, flags); spin_lock_irqsave(&cmxgcr_lock, flags);
clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
spin_unlock_irqrestore(&cmxgcr_lock, flags); spin_unlock_irqrestore(&cmxgcr_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment