Commit 11260373 authored by Olof Johansson's avatar Olof Johansson

Merge tag 'soc-fsl-next-v5.3' of...

Merge tag 'soc-fsl-next-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers

NXP/FSL SoC driver updates for v5.3

DPAA2 Console driver
- Add driver to export two char devices to dump logs for MC and
  AIOP

DPAA2 DPIO driver
- Add support for memory backed QBMan portals
- Increase the timeout period to prevent false error
- Add APIs to retrieve QBMan portal probing status

DPAA Qman driver
- Only make liodn fixup on powerpc SoCs with PAMU iommu

* tag 'soc-fsl-next-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux:
  soc: fsl: qbman_portals: add APIs to retrieve the probing status
  soc: fsl: qman: fixup liodns only on ppc targets
  soc: fsl: dpio: Add support for memory backed QBMan portals
  bus: mc-bus: Add support for mapping shareable portals
  soc: fsl: dpio: Increase timeout for QBMan Management Commands
  soc: fsl: add DPAA2 console support
  Documentation: DT: Add entry for DPAA2 console
  soc: fsl: guts: Add definition for LX2160A
Signed-off-by: default avatarOlof Johansson <olof@lixom.net>
parents 9e0babf2 5d1d046e
DPAA2 console support
Required properties:
- compatible
Value type: <string>
Definition: Must be "fsl,dpaa2-console".
- reg
Value type: <prop-encoded-array>
Definition: A standard property. Specifies the region where the MCFBA
(MC firmware base address) register can be found.
...@@ -6416,6 +6416,7 @@ M: Li Yang <leoyang.li@nxp.com> ...@@ -6416,6 +6416,7 @@ M: Li Yang <leoyang.li@nxp.com>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
L: linux-arm-kernel@lists.infradead.org L: linux-arm-kernel@lists.infradead.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
F: Documentation/devicetree/bindings/soc/fsl/ F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/ F: drivers/soc/fsl/
F: include/linux/fsl/ F: include/linux/fsl/
......
...@@ -443,11 +443,31 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io, ...@@ -443,11 +443,31 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
struct fsl_mc_command cmd = { 0 }; struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_obj_region *cmd_params; struct dprc_cmd_get_obj_region *cmd_params;
struct dprc_rsp_get_obj_region *rsp_params; struct dprc_rsp_get_obj_region *rsp_params;
u16 major_ver, minor_ver;
int err; int err;
/* prepare command */ /* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, err = dprc_get_api_version(mc_io, 0,
cmd_flags, token); &major_ver,
&minor_ver);
if (err)
return err;
/**
* MC API version 6.3 introduced a new field to the region
* descriptor: base_address. If the older API is in use then the base
* address is set to zero to indicate it needs to be obtained elsewhere
* (typically the device tree).
*/
if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
cmd.header =
mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
cmd_flags, token);
else
cmd.header =
mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
cmd_flags, token);
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id); cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index; cmd_params->region_index = region_index;
...@@ -461,8 +481,12 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io, ...@@ -461,8 +481,12 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
/* retrieve response parameters */ /* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params; rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
region_desc->base_offset = le64_to_cpu(rsp_params->base_addr); region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
region_desc->size = le32_to_cpu(rsp_params->size); region_desc->size = le32_to_cpu(rsp_params->size);
if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
else
region_desc->base_address = 0;
return 0; return 0;
} }
......
...@@ -487,10 +487,19 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, ...@@ -487,10 +487,19 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
"dprc_get_obj_region() failed: %d\n", error); "dprc_get_obj_region() failed: %d\n", error);
goto error_cleanup_regions; goto error_cleanup_regions;
} }
/*
error = translate_mc_addr(mc_dev, mc_region_type, * Older MC only returned region offset and no base address
* If base address is in the region_desc use it otherwise
* revert to old mechanism
*/
if (region_desc.base_address)
regions[i].start = region_desc.base_address +
region_desc.base_offset;
else
error = translate_mc_addr(mc_dev, mc_region_type,
region_desc.base_offset, region_desc.base_offset,
&regions[i].start); &regions[i].start);
if (error < 0) { if (error < 0) {
dev_err(parent_dev, dev_err(parent_dev,
"Invalid MC offset: %#x (for %s.%d\'s region %d)\n", "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
...@@ -504,6 +513,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, ...@@ -504,6 +513,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].flags = IORESOURCE_IO; regions[i].flags = IORESOURCE_IO;
if (region_desc.flags & DPRC_REGION_CACHEABLE) if (region_desc.flags & DPRC_REGION_CACHEABLE)
regions[i].flags |= IORESOURCE_CACHEABLE; regions[i].flags |= IORESOURCE_CACHEABLE;
if (region_desc.flags & DPRC_REGION_SHAREABLE)
regions[i].flags |= IORESOURCE_MEM;
} }
mc_dev->regions = regions; mc_dev->regions = regions;
......
...@@ -79,9 +79,11 @@ int dpmcp_reset(struct fsl_mc_io *mc_io, ...@@ -79,9 +79,11 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command versioning */ /* DPRC command versioning */
#define DPRC_CMD_BASE_VERSION 1 #define DPRC_CMD_BASE_VERSION 1
#define DPRC_CMD_2ND_VERSION 2
#define DPRC_CMD_ID_OFFSET 4 #define DPRC_CMD_ID_OFFSET 4
#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) #define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
/* DPRC command IDs */ /* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800) #define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
...@@ -100,6 +102,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io, ...@@ -100,6 +102,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159) #define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A) #define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E) #define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F) #define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
struct dprc_cmd_open { struct dprc_cmd_open {
...@@ -199,9 +202,16 @@ struct dprc_rsp_get_obj_region { ...@@ -199,9 +202,16 @@ struct dprc_rsp_get_obj_region {
/* response word 0 */ /* response word 0 */
__le64 pad; __le64 pad;
/* response word 1 */ /* response word 1 */
__le64 base_addr; __le64 base_offset;
/* response word 2 */ /* response word 2 */
__le32 size; __le32 size;
__le32 pad2;
/* response word 3 */
__le32 flags;
__le32 pad3;
/* response word 4 */
/* base_addr may be zero if older MC firmware is used */
__le64 base_addr;
}; };
struct dprc_cmd_set_obj_irq { struct dprc_cmd_set_obj_irq {
...@@ -334,6 +344,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ...@@ -334,6 +344,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
/* Region flags */ /* Region flags */
/* Cacheable - Indicates that region should be mapped as cacheable */ /* Cacheable - Indicates that region should be mapped as cacheable */
#define DPRC_REGION_CACHEABLE 0x00000001 #define DPRC_REGION_CACHEABLE 0x00000001
#define DPRC_REGION_SHAREABLE 0x00000002
/** /**
* enum dprc_region_type - Region type * enum dprc_region_type - Region type
...@@ -342,7 +353,8 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ...@@ -342,7 +353,8 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
*/ */
enum dprc_region_type { enum dprc_region_type {
DPRC_REGION_TYPE_MC_PORTAL, DPRC_REGION_TYPE_MC_PORTAL,
DPRC_REGION_TYPE_QBMAN_PORTAL DPRC_REGION_TYPE_QBMAN_PORTAL,
DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
}; };
/** /**
...@@ -360,6 +372,7 @@ struct dprc_region_desc { ...@@ -360,6 +372,7 @@ struct dprc_region_desc {
u32 size; u32 size;
u32 flags; u32 flags;
enum dprc_region_type type; enum dprc_region_type type;
u64 base_address;
}; };
int dprc_get_obj_region(struct fsl_mc_io *mc_io, int dprc_get_obj_region(struct fsl_mc_io *mc_io,
......
...@@ -30,4 +30,14 @@ config FSL_MC_DPIO ...@@ -30,4 +30,14 @@ config FSL_MC_DPIO
other DPAA2 objects. This driver does not expose the DPIO other DPAA2 objects. This driver does not expose the DPIO
objects individually, but groups them under a service layer objects individually, but groups them under a service layer
API. API.
config DPAA2_CONSOLE
tristate "QorIQ DPAA2 console driver"
depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
default y
help
Console driver for DPAA2 platforms. Exports 2 char devices,
/dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
which can be used to dump the Management Complex and AIOP
firmware logs.
endmenu endmenu
...@@ -8,3 +8,4 @@ obj-$(CONFIG_QUICC_ENGINE) += qe/ ...@@ -8,3 +8,4 @@ obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/ obj-$(CONFIG_CPM) += qe/
obj-$(CONFIG_FSL_GUTS) += guts.o obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/ obj-$(CONFIG_FSL_MC_DPIO) += dpio/
obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Freescale DPAA2 Platforms Console Driver
*
* Copyright 2015-2016 Freescale Semiconductor Inc.
* Copyright 2018 NXP
*/
#define pr_fmt(fmt) "dpaa2-console: " fmt
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/io.h>
/* MC firmware base low/high registers indexes */
#define MCFBALR_OFFSET 0
#define MCFBAHR_OFFSET 1
/* Bit masks used to get the most/least significant part of the MC base addr */
#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
#define MC_FW_ADDR_MASK_LOW 0xE0000000
#define MC_BUFFER_OFFSET 0x01000000
#define MC_BUFFER_SIZE (1024 * 1024 * 16)
#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
#define AIOP_BUFFER_OFFSET 0x06000000
#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
#define AIOP_OFFSET_DELTA 0
#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
/* MC and AIOP Magic words */
#define MAGIC_MC 0x4d430100
#define MAGIC_AIOP 0x41494F50
struct log_header {
__le32 magic_word;
char reserved[4];
__le32 buf_start;
__le32 buf_length;
__le32 last_byte;
};
struct console_data {
void __iomem *map_addr;
struct log_header __iomem *hdr;
void __iomem *start_addr;
void __iomem *end_addr;
void __iomem *end_of_data;
void __iomem *cur_ptr;
};
static struct resource mc_base_addr;
static inline void adjust_end(struct console_data *cd)
{
u32 last_byte = readl(&cd->hdr->last_byte);
cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
}
static u64 get_mc_fw_base_address(void)
{
u64 mcfwbase = 0ULL;
u32 __iomem *mcfbaregs;
mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
if (!mcfbaregs) {
pr_err("could not map MC Firmaware Base registers\n");
return 0;
}
mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
MC_FW_ADDR_MASK_HIGH;
mcfwbase <<= 32;
mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
iounmap(mcfbaregs);
pr_debug("MC base address at 0x%016llx\n", mcfwbase);
return mcfwbase;
}
static ssize_t dpaa2_console_size(struct console_data *cd)
{
ssize_t size;
if (cd->cur_ptr <= cd->end_of_data)
size = cd->end_of_data - cd->cur_ptr;
else
size = (cd->end_addr - cd->cur_ptr) +
(cd->end_of_data - cd->start_addr);
return size;
}
static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
u64 offset, u64 size,
u32 expected_magic,
u32 offset_delta)
{
u32 read_magic, wrapped, last_byte, buf_start, buf_length;
struct console_data *cd;
u64 base_addr;
int err;
cd = kmalloc(sizeof(*cd), GFP_KERNEL);
if (!cd)
return -ENOMEM;
base_addr = get_mc_fw_base_address();
if (!base_addr) {
err = -EIO;
goto err_fwba;
}
cd->map_addr = ioremap(base_addr + offset, size);
if (!cd->map_addr) {
pr_err("cannot map console log memory\n");
err = -EIO;
goto err_ioremap;
}
cd->hdr = (struct log_header __iomem *)cd->map_addr;
read_magic = readl(&cd->hdr->magic_word);
last_byte = readl(&cd->hdr->last_byte);
buf_start = readl(&cd->hdr->buf_start);
buf_length = readl(&cd->hdr->buf_length);
if (read_magic != expected_magic) {
pr_warn("expected = %08x, read = %08x\n",
expected_magic, read_magic);
err = -EIO;
goto err_magic;
}
cd->start_addr = cd->map_addr + buf_start - offset_delta;
cd->end_addr = cd->start_addr + buf_length;
wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
adjust_end(cd);
if (wrapped && cd->end_of_data != cd->end_addr)
cd->cur_ptr = cd->end_of_data + 1;
else
cd->cur_ptr = cd->start_addr;
fp->private_data = cd;
return 0;
err_magic:
iounmap(cd->map_addr);
err_ioremap:
err_fwba:
kfree(cd);
return err;
}
static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
{
return dpaa2_generic_console_open(node, fp,
MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
MAGIC_MC, MC_OFFSET_DELTA);
}
static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
{
return dpaa2_generic_console_open(node, fp,
AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
MAGIC_AIOP, AIOP_OFFSET_DELTA);
}
static int dpaa2_console_close(struct inode *node, struct file *fp)
{
struct console_data *cd = fp->private_data;
iounmap(cd->map_addr);
kfree(cd);
return 0;
}
static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
size_t count, loff_t *f_pos)
{
struct console_data *cd = fp->private_data;
size_t bytes = dpaa2_console_size(cd);
size_t bytes_end = cd->end_addr - cd->cur_ptr;
size_t written = 0;
void *kbuf;
int err;
/* Check if we need to adjust the end of data addr */
adjust_end(cd);
if (cd->end_of_data == cd->cur_ptr)
return 0;
if (count < bytes)
bytes = count;
kbuf = kmalloc(bytes, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
if (bytes > bytes_end) {
memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
if (copy_to_user(buf, kbuf, bytes_end)) {
err = -EFAULT;
goto err_free_buf;
}
buf += bytes_end;
cd->cur_ptr = cd->start_addr;
bytes -= bytes_end;
written += bytes_end;
}
memcpy_fromio(kbuf, cd->cur_ptr, bytes);
if (copy_to_user(buf, kbuf, bytes)) {
err = -EFAULT;
goto err_free_buf;
}
cd->cur_ptr += bytes;
written += bytes;
return written;
err_free_buf:
kfree(kbuf);
return err;
}
static const struct file_operations dpaa2_mc_console_fops = {
.owner = THIS_MODULE,
.open = dpaa2_mc_console_open,
.release = dpaa2_console_close,
.read = dpaa2_console_read,
};
static struct miscdevice dpaa2_mc_console_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "dpaa2_mc_console",
.fops = &dpaa2_mc_console_fops
};
static const struct file_operations dpaa2_aiop_console_fops = {
.owner = THIS_MODULE,
.open = dpaa2_aiop_console_open,
.release = dpaa2_console_close,
.read = dpaa2_console_read,
};
static struct miscdevice dpaa2_aiop_console_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "dpaa2_aiop_console",
.fops = &dpaa2_aiop_console_fops
};
static int dpaa2_console_probe(struct platform_device *pdev)
{
int error;
error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
if (error < 0) {
pr_err("of_address_to_resource() failed for %pOF with %d\n",
pdev->dev.of_node, error);
return error;
}
error = misc_register(&dpaa2_mc_console_dev);
if (error) {
pr_err("cannot register device %s\n",
dpaa2_mc_console_dev.name);
goto err_register_mc;
}
error = misc_register(&dpaa2_aiop_console_dev);
if (error) {
pr_err("cannot register device %s\n",
dpaa2_aiop_console_dev.name);
goto err_register_aiop;
}
return 0;
err_register_aiop:
misc_deregister(&dpaa2_mc_console_dev);
err_register_mc:
return error;
}
static int dpaa2_console_remove(struct platform_device *pdev)
{
misc_deregister(&dpaa2_mc_console_dev);
misc_deregister(&dpaa2_aiop_console_dev);
return 0;
}
static const struct of_device_id dpaa2_console_match_table[] = {
{ .compatible = "fsl,dpaa2-console",},
{},
};
MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
static struct platform_driver dpaa2_console_driver = {
.driver = {
.name = "dpaa2-console",
.pm = NULL,
.of_match_table = dpaa2_console_match_table,
},
.probe = dpaa2_console_probe,
.remove = dpaa2_console_remove,
};
module_platform_driver(dpaa2_console_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
MODULE_DESCRIPTION("DPAA2 console driver");
...@@ -197,13 +197,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) ...@@ -197,13 +197,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
desc.cpu); desc.cpu);
} }
/* if (dpio_dev->obj_desc.region_count < 3) {
* Set the CENA regs to be the cache inhibited area of the portal to /* No support for DDR backed portals, use classic mapping */
* avoid coherency issues if a user migrates to another core. /*
*/ * Set the CENA regs to be the cache inhibited area of the
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, * portal to avoid coherency issues if a user migrates to
resource_size(&dpio_dev->regions[1]), * another core.
MEMREMAP_WC); */
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
resource_size(&dpio_dev->regions[1]),
MEMREMAP_WC);
} else {
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
resource_size(&dpio_dev->regions[2]),
MEMREMAP_WB);
}
if (IS_ERR(desc.regs_cena)) { if (IS_ERR(desc.regs_cena)) {
dev_err(dev, "devm_memremap failed\n"); dev_err(dev, "devm_memremap failed\n");
err = PTR_ERR(desc.regs_cena); err = PTR_ERR(desc.regs_cena);
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#define QMAN_REV_4000 0x04000000 #define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000 #define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001 #define QMAN_REV_4101 0x04010001
#define QMAN_REV_5000 0x05000000
#define QMAN_REV_MASK 0xffff0000 #define QMAN_REV_MASK 0xffff0000
/* All QBMan command and result structures use this "valid bit" encoding */ /* All QBMan command and result structures use this "valid bit" encoding */
...@@ -25,10 +27,17 @@ ...@@ -25,10 +27,17 @@
#define QBMAN_WQCHAN_CONFIGURE 0x46 #define QBMAN_WQCHAN_CONFIGURE 0x46
/* CINH register offsets */ /* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
#define QBMAN_CINH_SWP_EQAR 0x8c0 #define QBMAN_CINH_SWP_EQAR 0x8c0
#define QBMAN_CINH_SWP_CR_RT 0x900
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00 #define QBMAN_CINH_SWP_DQPI 0xa00
#define QBMAN_CINH_SWP_DCAP 0xac0 #define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00 #define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
#define QBMAN_CINH_SWP_RCR_PI 0xc00
#define QBMAN_CINH_SWP_RAR 0xcc0 #define QBMAN_CINH_SWP_RAR 0xcc0
#define QBMAN_CINH_SWP_ISR 0xe00 #define QBMAN_CINH_SWP_ISR 0xe00
#define QBMAN_CINH_SWP_IER 0xe40 #define QBMAN_CINH_SWP_IER 0xe40
...@@ -43,6 +52,13 @@ ...@@ -43,6 +52,13 @@
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780 #define QBMAN_CENA_SWP_VDQCR 0x780
/* CENA register offsets in memory-backed mode */
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
#define QBMAN_CENA_SWP_CR_MEM 0x1600
#define QBMAN_CENA_SWP_RR_MEM 0x1680
#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
...@@ -96,10 +112,13 @@ static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset) ...@@ -96,10 +112,13 @@ static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
#define SWP_CFG_DQRR_MF_SHIFT 20 #define SWP_CFG_DQRR_MF_SHIFT 20
#define SWP_CFG_EST_SHIFT 16 #define SWP_CFG_EST_SHIFT 16
#define SWP_CFG_CPBS_SHIFT 15
#define SWP_CFG_WN_SHIFT 14 #define SWP_CFG_WN_SHIFT 14
#define SWP_CFG_RPM_SHIFT 12 #define SWP_CFG_RPM_SHIFT 12
#define SWP_CFG_DCM_SHIFT 10 #define SWP_CFG_DCM_SHIFT 10
#define SWP_CFG_EPM_SHIFT 8 #define SWP_CFG_EPM_SHIFT 8
#define SWP_CFG_VPM_SHIFT 7
#define SWP_CFG_CPM_SHIFT 6
#define SWP_CFG_SD_SHIFT 5 #define SWP_CFG_SD_SHIFT 5
#define SWP_CFG_SP_SHIFT 4 #define SWP_CFG_SP_SHIFT 4
#define SWP_CFG_SE_SHIFT 3 #define SWP_CFG_SE_SHIFT 3
...@@ -125,6 +144,8 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, ...@@ -125,6 +144,8 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
ep << SWP_CFG_EP_SHIFT); ep << SWP_CFG_EP_SHIFT);
} }
#define QMAN_RT_MODE 0x00000100
/** /**
* qbman_swp_init() - Create a functional object representing the given * qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor. * QBMan portal descriptor.
...@@ -146,6 +167,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ...@@ -146,6 +167,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
p->mr.valid_bit = QB_VALID_BIT;
atomic_set(&p->vdq.available, 1); atomic_set(&p->vdq.available, 1);
p->vdq.valid_bit = QB_VALID_BIT; p->vdq.valid_bit = QB_VALID_BIT;
...@@ -163,6 +186,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ...@@ -163,6 +186,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->addr_cena = d->cena_bar; p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar; p->addr_cinh = d->cinh_bar;
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
memset(p->addr_cena, 0, 64 * 1024);
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
1, /* Writes Non-cacheable */ 1, /* Writes Non-cacheable */
0, /* EQCR_CI stashing threshold */ 0, /* EQCR_CI stashing threshold */
...@@ -175,6 +201,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ...@@ -175,6 +201,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
1, /* dequeue stashing priority == TRUE */ 1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */ 0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */ 0); /* EQCR_CI stashing priority == FALSE */
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
...@@ -184,6 +214,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ...@@ -184,6 +214,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
return NULL; return NULL;
} }
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
}
/* /*
* SDQCR needs to be initialized to 0 when no channels are * SDQCR needs to be initialized to 0 when no channels are
* being dequeued from or else the QMan HW will indicate an * being dequeued from or else the QMan HW will indicate an
...@@ -278,7 +312,10 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) ...@@ -278,7 +312,10 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
*/ */
void *qbman_swp_mc_start(struct qbman_swp *p) void *qbman_swp_mc_start(struct qbman_swp *p)
{ {
return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
else
return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
} }
/* /*
...@@ -289,8 +326,14 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb) ...@@ -289,8 +326,14 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
{ {
u8 *v = cmd; u8 *v = cmd;
dma_wmb(); if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
*v = cmd_verb | p->mc.valid_bit; dma_wmb();
*v = cmd_verb | p->mc.valid_bit;
} else {
*v = cmd_verb | p->mc.valid_bit;
dma_wmb();
qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
}
} }
/* /*
...@@ -301,13 +344,27 @@ void *qbman_swp_mc_result(struct qbman_swp *p) ...@@ -301,13 +344,27 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
{ {
u32 *ret, verb; u32 *ret, verb;
ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
/* Remove the valid-bit - command completed if the rest
* is non-zero.
*/
verb = ret[0] & ~QB_VALID_BIT;
if (!verb)
return NULL;
p->mc.valid_bit ^= QB_VALID_BIT;
} else {
ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
/* Command completed if the valid bit is toggled */
if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
return NULL;
/* Command completed if the rest is non-zero */
verb = ret[0] & ~QB_VALID_BIT;
if (!verb)
return NULL;
p->mr.valid_bit ^= QB_VALID_BIT;
}
/* Remove the valid-bit - command completed if the rest is non-zero */
verb = ret[0] & ~QB_VALID_BIT;
if (!verb)
return NULL;
p->mc.valid_bit ^= QB_VALID_BIT;
return ret; return ret;
} }
...@@ -384,6 +441,18 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, ...@@ -384,6 +441,18 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
#define EQAR_VB(eqar) ((eqar) & 0x80) #define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
u8 idx)
{
if (idx < 16)
qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
QMAN_RT_MODE);
else
qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
(idx - 16) * 4,
QMAN_RT_MODE);
}
/** /**
* qbman_swp_enqueue() - Issue an enqueue command * qbman_swp_enqueue() - Issue an enqueue command
* @s: the software portal used for enqueue * @s: the software portal used for enqueue
...@@ -408,9 +477,15 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, ...@@ -408,9 +477,15 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
memcpy(&p->dca, &d->dca, 31); memcpy(&p->dca, &d->dca, 31);
memcpy(&p->fd, fd, sizeof(*fd)); memcpy(&p->fd, fd, sizeof(*fd));
/* Set the verb byte, have to substitute in the valid-bit */ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
dma_wmb(); /* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | EQAR_VB(eqar); dma_wmb();
p->verb = d->verb | EQAR_VB(eqar);
} else {
p->verb = d->verb | EQAR_VB(eqar);
dma_wmb();
qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
}
return 0; return 0;
} }
...@@ -587,17 +662,27 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) ...@@ -587,17 +662,27 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
return -EBUSY; return -EBUSY;
} }
s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p->numf = d->numf; p->numf = d->numf;
p->tok = QMAN_DQ_TOKEN_VALID; p->tok = QMAN_DQ_TOKEN_VALID;
p->dq_src = d->dq_src; p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr; p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt; p->rsp_addr_virt = d->rsp_addr_virt;
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
p->verb = d->verb | s->vdq.valid_bit; dma_wmb();
s->vdq.valid_bit ^= QB_VALID_BIT; /* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
} else {
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
}
return 0; return 0;
} }
...@@ -655,7 +740,10 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) ...@@ -655,7 +740,10 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
} }
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb; verb = p->dq.verb;
/* /*
...@@ -807,18 +895,28 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ...@@ -807,18 +895,28 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
return -EBUSY; return -EBUSY;
/* Start the release command */ /* Start the release command */
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */ /* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++) for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]); p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid; p->bpid = d->bpid;
/* if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
* Set the verb byte, have to substitute in the valid-bit and the number /*
* of buffers. * Set the verb byte, have to substitute in the valid-bit
*/ * and the number of buffers.
dma_wmb(); */
p->verb = d->verb | RAR_VB(rar) | num_buffers; dma_wmb();
p->verb = d->verb | RAR_VB(rar) | num_buffers;
} else {
p->verb = d->verb | RAR_VB(rar) | num_buffers;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
RAR_IDX(rar) * 4, QMAN_RT_MODE);
}
return 0; return 0;
} }
......
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* /*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc. * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP * Copyright 2016-2019 NXP
* *
*/ */
#ifndef __FSL_QBMAN_PORTAL_H #ifndef __FSL_QBMAN_PORTAL_H
...@@ -110,6 +110,11 @@ struct qbman_swp { ...@@ -110,6 +110,11 @@ struct qbman_swp {
u32 valid_bit; /* 0x00 or 0x80 */ u32 valid_bit; /* 0x00 or 0x80 */
} mc; } mc;
/* Management response */
struct {
u32 valid_bit; /* 0x00 or 0x80 */
} mr;
/* Push dequeues */ /* Push dequeues */
u32 sdq; u32 sdq;
...@@ -428,7 +433,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, ...@@ -428,7 +433,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
u8 cmd_verb) u8 cmd_verb)
{ {
int loopvar = 1000; int loopvar = 2000;
qbman_swp_mc_submit(swp, cmd, cmd_verb); qbman_swp_mc_submit(swp, cmd, cmd_verb);
......
...@@ -97,6 +97,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = { ...@@ -97,6 +97,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
.svr = 0x87000000, .svr = 0x87000000,
.mask = 0xfff70000, .mask = 0xfff70000,
}, },
/* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
{ .die = "LX2160A",
.svr = 0x87360000,
.mask = 0xff3f0000,
},
{ }, { },
}; };
...@@ -218,6 +223,7 @@ static const struct of_device_id fsl_guts_of_match[] = { ...@@ -218,6 +223,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1088a-dcfg", }, { .compatible = "fsl,ls1088a-dcfg", },
{ .compatible = "fsl,ls1012a-dcfg", }, { .compatible = "fsl,ls1012a-dcfg", },
{ .compatible = "fsl,ls1046a-dcfg", }, { .compatible = "fsl,ls1046a-dcfg", },
{ .compatible = "fsl,lx2160a-dcfg", },
{} {}
}; };
MODULE_DEVICE_TABLE(of, fsl_guts_of_match); MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
static struct bman_portal *affine_bportals[NR_CPUS]; static struct bman_portal *affine_bportals[NR_CPUS];
static struct cpumask portal_cpus; static struct cpumask portal_cpus;
static int __bman_portals_probed;
/* protect bman global registers and global data shared among portals */ /* protect bman global registers and global data shared among portals */
static DEFINE_SPINLOCK(bman_lock); static DEFINE_SPINLOCK(bman_lock);
...@@ -87,6 +88,12 @@ static int bman_online_cpu(unsigned int cpu) ...@@ -87,6 +88,12 @@ static int bman_online_cpu(unsigned int cpu)
return 0; return 0;
} }
int bman_portals_probed(void)
{
return __bman_portals_probed;
}
EXPORT_SYMBOL_GPL(bman_portals_probed);
static int bman_portal_probe(struct platform_device *pdev) static int bman_portal_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
...@@ -104,8 +111,10 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -104,8 +111,10 @@ static int bman_portal_probe(struct platform_device *pdev)
} }
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg) if (!pcfg) {
__bman_portals_probed = -1;
return -ENOMEM; return -ENOMEM;
}
pcfg->dev = dev; pcfg->dev = dev;
...@@ -113,14 +122,14 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -113,14 +122,14 @@ static int bman_portal_probe(struct platform_device *pdev)
DPAA_PORTAL_CE); DPAA_PORTAL_CE);
if (!addr_phys[0]) { if (!addr_phys[0]) {
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
return -ENXIO; goto err_ioremap1;
} }
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
DPAA_PORTAL_CI); DPAA_PORTAL_CI);
if (!addr_phys[1]) { if (!addr_phys[1]) {
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
return -ENXIO; goto err_ioremap1;
} }
pcfg->cpu = -1; pcfg->cpu = -1;
...@@ -128,7 +137,7 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -128,7 +137,7 @@ static int bman_portal_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq <= 0) { if (irq <= 0) {
dev_err(dev, "Can't get %pOF IRQ'\n", node); dev_err(dev, "Can't get %pOF IRQ'\n", node);
return -ENXIO; goto err_ioremap1;
} }
pcfg->irq = irq; pcfg->irq = irq;
...@@ -150,6 +159,7 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -150,6 +159,7 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_lock(&bman_lock); spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus); cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) { if (cpu >= nr_cpu_ids) {
__bman_portals_probed = 1;
/* unassigned portal, skip init */ /* unassigned portal, skip init */
spin_unlock(&bman_lock); spin_unlock(&bman_lock);
return 0; return 0;
...@@ -175,6 +185,8 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -175,6 +185,8 @@ static int bman_portal_probe(struct platform_device *pdev)
err_ioremap2: err_ioremap2:
memunmap(pcfg->addr_virt_ce); memunmap(pcfg->addr_virt_ce);
err_ioremap1: err_ioremap1:
__bman_portals_probed = -1;
return -ENXIO; return -ENXIO;
} }
......
...@@ -596,7 +596,7 @@ static int qman_init_ccsr(struct device *dev) ...@@ -596,7 +596,7 @@ static int qman_init_ccsr(struct device *dev)
} }
#define LIO_CFG_LIODN_MASK 0x0fff0000 #define LIO_CFG_LIODN_MASK 0x0fff0000
void qman_liodn_fixup(u16 channel) void __qman_liodn_fixup(u16 channel)
{ {
static int done; static int done;
static u32 liodn_offset; static u32 liodn_offset;
......
...@@ -38,6 +38,7 @@ EXPORT_SYMBOL(qman_dma_portal); ...@@ -38,6 +38,7 @@ EXPORT_SYMBOL(qman_dma_portal);
#define CONFIG_FSL_DPA_PIRQ_FAST 1 #define CONFIG_FSL_DPA_PIRQ_FAST 1
static struct cpumask portal_cpus; static struct cpumask portal_cpus;
static int __qman_portals_probed;
/* protect qman global registers and global data shared among portals */ /* protect qman global registers and global data shared among portals */
static DEFINE_SPINLOCK(qman_lock); static DEFINE_SPINLOCK(qman_lock);
...@@ -220,6 +221,12 @@ static int qman_online_cpu(unsigned int cpu) ...@@ -220,6 +221,12 @@ static int qman_online_cpu(unsigned int cpu)
return 0; return 0;
} }
int qman_portals_probed(void)
{
return __qman_portals_probed;
}
EXPORT_SYMBOL_GPL(qman_portals_probed);
static int qman_portal_probe(struct platform_device *pdev) static int qman_portal_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
...@@ -238,8 +245,10 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -238,8 +245,10 @@ static int qman_portal_probe(struct platform_device *pdev)
} }
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg) if (!pcfg) {
__qman_portals_probed = -1;
return -ENOMEM; return -ENOMEM;
}
pcfg->dev = dev; pcfg->dev = dev;
...@@ -247,19 +256,20 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -247,19 +256,20 @@ static int qman_portal_probe(struct platform_device *pdev)
DPAA_PORTAL_CE); DPAA_PORTAL_CE);
if (!addr_phys[0]) { if (!addr_phys[0]) {
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
return -ENXIO; goto err_ioremap1;
} }
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
DPAA_PORTAL_CI); DPAA_PORTAL_CI);
if (!addr_phys[1]) { if (!addr_phys[1]) {
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
return -ENXIO; goto err_ioremap1;
} }
err = of_property_read_u32(node, "cell-index", &val); err = of_property_read_u32(node, "cell-index", &val);
if (err) { if (err) {
dev_err(dev, "Can't get %pOF property 'cell-index'\n", node); dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
__qman_portals_probed = -1;
return err; return err;
} }
pcfg->channel = val; pcfg->channel = val;
...@@ -267,7 +277,7 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -267,7 +277,7 @@ static int qman_portal_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq <= 0) { if (irq <= 0) {
dev_err(dev, "Can't get %pOF IRQ\n", node); dev_err(dev, "Can't get %pOF IRQ\n", node);
return -ENXIO; goto err_ioremap1;
} }
pcfg->irq = irq; pcfg->irq = irq;
...@@ -291,6 +301,7 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -291,6 +301,7 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_lock(&qman_lock); spin_lock(&qman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus); cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) { if (cpu >= nr_cpu_ids) {
__qman_portals_probed = 1;
/* unassigned portal, skip init */ /* unassigned portal, skip init */
spin_unlock(&qman_lock); spin_unlock(&qman_lock);
return 0; return 0;
...@@ -321,6 +332,8 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -321,6 +332,8 @@ static int qman_portal_probe(struct platform_device *pdev)
err_ioremap2: err_ioremap2:
memunmap(pcfg->addr_virt_ce); memunmap(pcfg->addr_virt_ce);
err_ioremap1: err_ioremap1:
__qman_portals_probed = -1;
return -ENXIO; return -ENXIO;
} }
......
...@@ -193,7 +193,14 @@ extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */ ...@@ -193,7 +193,14 @@ extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
u32 qm_get_pools_sdqcr(void); u32 qm_get_pools_sdqcr(void);
int qman_wq_alloc(void); int qman_wq_alloc(void);
void qman_liodn_fixup(u16 channel); #ifdef CONFIG_FSL_PAMU
#define qman_liodn_fixup __qman_liodn_fixup
#else
static inline void qman_liodn_fixup(u16 channel)
{
}
#endif
void __qman_liodn_fixup(u16 channel);
void qman_set_sdest(u16 channel, unsigned int cpu_idx); void qman_set_sdest(u16 channel, unsigned int cpu_idx);
struct qman_portal *qman_create_affine_portal( struct qman_portal *qman_create_affine_portal(
......
...@@ -133,5 +133,13 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); ...@@ -133,5 +133,13 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
* failed to probe or 0 if the bman driver did not probed yet. * failed to probe or 0 if the bman driver did not probed yet.
*/ */
int bman_is_probed(void); int bman_is_probed(void);
/**
* bman_portals_probed - Check if all cpu bound bman portals are probed
*
* Returns 1 if all the required cpu bound bman portals successfully probed,
* -1 if probe errors appeared or 0 if the bman portals did not yet finished
* probing.
*/
int bman_portals_probed(void);
#endif /* __FSL_BMAN_H */ #endif /* __FSL_BMAN_H */
...@@ -1194,6 +1194,15 @@ int qman_release_cgrid(u32 id); ...@@ -1194,6 +1194,15 @@ int qman_release_cgrid(u32 id);
*/ */
int qman_is_probed(void); int qman_is_probed(void);
/**
* qman_portals_probed - Check if all cpu bound qman portals are probed
*
* Returns 1 if all the required cpu bound qman portals successfully probed,
* -1 if probe errors appeared or 0 if the qman portals did not yet finished
* probing.
*/
int qman_portals_probed(void);
/** /**
* qman_dqrr_get_ithresh - Get coalesce interrupt threshold * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
* @portal: portal to get the value for * @portal: portal to get the value for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment