Commit a45ad71e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rproc-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc

Pull remoteproc updates from Bjorn Andersson:
 "This adds support for the Mediatek MT8183 SCP, modem remoteproc on
  Qualcomm SC7180 platform, audio and sensor remoteprocs on Qualcomm
  MSM8998 and audio, compute, modem and sensor remoteprocs on Qualcomm
  SM8150.

  It adds votes for necessary power-domains for all Qualcomm TrustZone
  based remoteproc instances are held, fixes a bug related to remoteproc
  drivers registering before the core has been initialized and does
  clean up the Qualcomm modem remoteproc driver"

* tag 'rproc-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc: (21 commits)
  remoteproc: qcom: q6v5-mss: Improve readability of reset_assert
  remoteproc: qcom: q6v5-mss: Use regmap_read_poll_timeout
  remoteproc: qcom: q6v5-mss: Rename boot status timeout
  remoteproc: qcom: q6v5-mss: Improve readability across clk handling
  remoteproc: use struct_size() helper
  remoteproc: Initialize rproc_class before use
  rpmsg: add rpmsg support for mt8183 SCP.
  remoteproc/mediatek: add SCP support for mt8183
  dt-bindings: Add a binding for Mediatek SCP
  remoteproc: mss: q6v5-mss: Add modem support on SC7180
  dt-bindings: remoteproc: qcom: Add Q6V5 Modem PIL binding for SC7180
  remoteproc: qcom: pas: Add MSM8998 ADSP and SLPI support
  dt-bindings: remoteproc: qcom: Add ADSP and SLPI support for MSM8998 SoC
  remoteproc: q6v5-mss: Remove mem clk from the active pool
  remoteproc: qcom: Remove unneeded semicolon
  remoteproc: qcom: pas: Add auto_boot flag
  remoteproc: qcom: pas: Add SM8150 ADSP, CDSP, Modem and SLPI support
  dt-bindings: remoteproc: qcom: SM8150 Add ADSP, CDSP, MPSS and SLPI support
  remoteproc: qcom: pas: Vote for active/proxy power domains
  dt-bindings: remoteproc: qcom: Add power-domain bindings for Q6V5 PAS
  ...
parents 68509798 600c39b3
Mediatek SCP Bindings
----------------------------------------
This binding provides support for ARM Cortex M4 Co-processor found on some
Mediatek SoCs.
Required properties:
- compatible Should be "mediatek,mt8183-scp"
- reg Should contain the address ranges for the two memory
regions, SRAM and CFG.
- reg-names Contains the corresponding names for the two memory
regions. These should be named "sram" & "cfg".
- clocks Clock for co-processor (See: ../clock/clock-bindings.txt)
- clock-names Contains the corresponding name for the clock. This
should be named "main".
Subnodes
--------
Subnodes of the SCP represent rpmsg devices. The names of the devices are not
important. The properties of these nodes are defined by the individual bindings
for the rpmsg devices - but must contain the following property:
- mtk,rpmsg-name Contains the name for the rpmsg device. Used to match
the subnode to rpmsg device announced by SCP.
Example:
scp: scp@10500000 {
compatible = "mediatek,mt8183-scp";
reg = <0 0x10500000 0 0x80000>,
<0 0x105c0000 0 0x5000>;
reg-names = "sram", "cfg";
clocks = <&infracfg CLK_INFRA_SCPSYS>;
clock-names = "main";
};
...@@ -10,11 +10,17 @@ on the Qualcomm ADSP Hexagon core. ...@@ -10,11 +10,17 @@ on the Qualcomm ADSP Hexagon core.
"qcom,msm8974-adsp-pil" "qcom,msm8974-adsp-pil"
"qcom,msm8996-adsp-pil" "qcom,msm8996-adsp-pil"
"qcom,msm8996-slpi-pil" "qcom,msm8996-slpi-pil"
"qcom,msm8998-adsp-pas"
"qcom,msm8998-slpi-pas"
"qcom,qcs404-adsp-pas" "qcom,qcs404-adsp-pas"
"qcom,qcs404-cdsp-pas" "qcom,qcs404-cdsp-pas"
"qcom,qcs404-wcss-pas" "qcom,qcs404-wcss-pas"
"qcom,sdm845-adsp-pas" "qcom,sdm845-adsp-pas"
"qcom,sdm845-cdsp-pas" "qcom,sdm845-cdsp-pas"
"qcom,sm8150-adsp-pas"
"qcom,sm8150-cdsp-pas"
"qcom,sm8150-mpss-pas"
"qcom,sm8150-slpi-pas"
- interrupts-extended: - interrupts-extended:
Usage: required Usage: required
...@@ -29,12 +35,18 @@ on the Qualcomm ADSP Hexagon core. ...@@ -29,12 +35,18 @@ on the Qualcomm ADSP Hexagon core.
qcom,msm8974-adsp-pil: qcom,msm8974-adsp-pil:
qcom,msm8996-adsp-pil: qcom,msm8996-adsp-pil:
qcom,msm8996-slpi-pil: qcom,msm8996-slpi-pil:
qcom,msm8998-adsp-pas:
qcom,msm8998-slpi-pas:
qcom,qcs404-adsp-pas: qcom,qcs404-adsp-pas:
qcom,qcs404-cdsp-pas: qcom,qcs404-cdsp-pas:
qcom,sdm845-adsp-pas: qcom,sdm845-adsp-pas:
qcom,sdm845-cdsp-pas: qcom,sdm845-cdsp-pas:
qcom,sm8150-adsp-pas:
qcom,sm8150-cdsp-pas:
qcom,sm8150-slpi-pas:
must be "wdog", "fatal", "ready", "handover", "stop-ack" must be "wdog", "fatal", "ready", "handover", "stop-ack"
qcom,qcs404-wcss-pas: qcom,qcs404-wcss-pas:
qcom,sm8150-mpss-pas:
must be "wdog", "fatal", "ready", "handover", "stop-ack", must be "wdog", "fatal", "ready", "handover", "stop-ack",
"shutdown-ack" "shutdown-ack"
...@@ -67,6 +79,38 @@ on the Qualcomm ADSP Hexagon core. ...@@ -67,6 +79,38 @@ on the Qualcomm ADSP Hexagon core.
Definition: reference to the px regulator to be held on behalf of the Definition: reference to the px regulator to be held on behalf of the
booting Hexagon core booting Hexagon core
- power-domains:
Usage: required
Value type: <phandle>
Definition: reference to power-domains that match the power-domain-names
- power-domain-names:
Usage: required
Value type: <stringlist>
Definition: The power-domains needed depend on the compatible string:
qcom,msm8974-adsp-pil:
qcom,msm8996-adsp-pil:
qcom,msm8998-adsp-pas:
must be "cx"
qcom,msm8996-slpi-pil:
must be "ss_cx"
qcom,msm8998-slpi-pas:
must be "ssc_cx"
qcom,qcs404-adsp-pas:
must be "lpi_cx"
qcom,qcs404-cdsp-pas:
qcom,qcs404-wcss-pas:
must be "mx"
qcom,sdm845-adsp-pas:
qcom,sdm845-cdsp-pas:
qcom,sm8150-adsp-pas:
qcom,sm8150-cdsp-pas:
must be "cx", "load_state"
qcom,sm8150-mpss-pas:
must be "cx", "load_state", "mss"
qcom,sm8150-slpi-pas:
must be "lcx", "lmx", "load_state"
- memory-region: - memory-region:
Usage: required Usage: required
Value type: <phandle> Value type: <phandle>
......
...@@ -13,6 +13,7 @@ on the Qualcomm Hexagon core. ...@@ -13,6 +13,7 @@ on the Qualcomm Hexagon core.
"qcom,msm8974-mss-pil" "qcom,msm8974-mss-pil"
"qcom,msm8996-mss-pil" "qcom,msm8996-mss-pil"
"qcom,msm8998-mss-pil" "qcom,msm8998-mss-pil"
"qcom,sc7180-mss-pil"
"qcom,sdm845-mss-pil" "qcom,sdm845-mss-pil"
- reg: - reg:
...@@ -43,6 +44,7 @@ on the Qualcomm Hexagon core. ...@@ -43,6 +44,7 @@ on the Qualcomm Hexagon core.
must be "wdog", "fatal", "ready", "handover", "stop-ack" must be "wdog", "fatal", "ready", "handover", "stop-ack"
qcom,msm8996-mss-pil: qcom,msm8996-mss-pil:
qcom,msm8998-mss-pil: qcom,msm8998-mss-pil:
qcom,sc7180-mss-pil:
qcom,sdm845-mss-pil: qcom,sdm845-mss-pil:
must be "wdog", "fatal", "ready", "handover", "stop-ack", must be "wdog", "fatal", "ready", "handover", "stop-ack",
"shutdown-ack" "shutdown-ack"
...@@ -75,6 +77,9 @@ on the Qualcomm Hexagon core. ...@@ -75,6 +77,9 @@ on the Qualcomm Hexagon core.
qcom,msm8998-mss-pil: qcom,msm8998-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss", must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "qdss" "snoc_axi", "mnoc_axi", "qdss"
qcom,sc7180-mss-pil:
must be "iface", "bus", "xo", "snoc_axi", "mnoc_axi",
"mss_crypto", "mss_nav", "nav"
qcom,sdm845-mss-pil: qcom,sdm845-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss", must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "prng" "snoc_axi", "mnoc_axi", "prng"
...@@ -86,7 +91,7 @@ on the Qualcomm Hexagon core. ...@@ -86,7 +91,7 @@ on the Qualcomm Hexagon core.
reference to the list of 3 reset-controllers for the reference to the list of 3 reset-controllers for the
wcss sub-system wcss sub-system
reference to the list of 2 reset-controllers for the modem reference to the list of 2 reset-controllers for the modem
sub-system on SDM845 SoCs sub-system on SC7180, SDM845 SoCs
- reset-names: - reset-names:
Usage: required Usage: required
...@@ -95,7 +100,7 @@ on the Qualcomm Hexagon core. ...@@ -95,7 +100,7 @@ on the Qualcomm Hexagon core.
must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset" must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
for the wcss sub-system for the wcss sub-system
must be "mss_restart", "pdc_reset" for the modem must be "mss_restart", "pdc_reset" for the modem
sub-system on SDM845 SoCs sub-system on SC7180, SDM845 SoCs
For the compatible strings below the following supplies are required: For the compatible strings below the following supplies are required:
"qcom,q6v5-pil" "qcom,q6v5-pil"
...@@ -144,6 +149,7 @@ For the compatible string below the following supplies are required: ...@@ -144,6 +149,7 @@ For the compatible string below the following supplies are required:
qcom,msm8996-mss-pil: qcom,msm8996-mss-pil:
qcom,msm8998-mss-pil: qcom,msm8998-mss-pil:
must be "cx", "mx" must be "cx", "mx"
qcom,sc7180-mss-pil:
qcom,sdm845-mss-pil: qcom,sdm845-mss-pil:
must be "cx", "mx", "mss", "load_state" must be "cx", "mx", "mss", "load_state"
...@@ -165,6 +171,19 @@ For the compatible string below the following supplies are required: ...@@ -165,6 +171,19 @@ For the compatible string below the following supplies are required:
by the three offsets within syscon for q6, modem and nc by the three offsets within syscon for q6, modem and nc
halt registers. halt registers.
For the compatible strings below the following phandle references are required:
"qcom,sc7180-mss-pil"
- qcom,halt-nav-regs:
Usage: required
Value type: <prop-encoded-array>
Definition: reference to a list of 2 phandles with one offset each for
the modem sub-system running on SC7180 SoC. The first
phandle reference is to the mss clock node followed by the
offset within register space for nav halt register. The
second phandle reference is to a syscon representing TCSR
followed by the offset within syscon for conn_box_spare0
register.
= SUBNODES: = SUBNODES:
The Hexagon node must contain two subnodes, named "mba" and "mpss" representing The Hexagon node must contain two subnodes, named "mba" and "mpss" representing
the memory regions used by the Hexagon firmware. Each sub-node must contain: the memory regions used by the Hexagon firmware. Each sub-node must contain:
......
...@@ -23,6 +23,16 @@ config IMX_REMOTEPROC ...@@ -23,6 +23,16 @@ config IMX_REMOTEPROC
It's safe to say N here. It's safe to say N here.
config MTK_SCP
tristate "Mediatek SCP support"
depends on ARCH_MEDIATEK
select RPMSG_MTK_SCP
help
Say y here to support Mediatek's System Companion Processor (SCP) via
the remote processor framework.
It's safe to say N here.
config OMAP_REMOTEPROC config OMAP_REMOTEPROC
tristate "OMAP remoteproc support" tristate "OMAP remoteproc support"
depends on ARCH_OMAP4 || SOC_OMAP5 depends on ARCH_OMAP4 || SOC_OMAP5
......
...@@ -10,6 +10,7 @@ remoteproc-y += remoteproc_sysfs.o ...@@ -10,6 +10,7 @@ remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019 MediaTek Inc.
*/
#ifndef __RPROC_MTK_COMMON_H
#define __RPROC_MTK_COMMON_H
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
#define MT8183_SW_RSTN 0x0
#define MT8183_SW_RSTN_BIT BIT(0)
#define MT8183_SCP_TO_HOST 0x1C
#define MT8183_SCP_IPC_INT_BIT BIT(0)
#define MT8183_SCP_WDT_INT_BIT BIT(8)
#define MT8183_HOST_TO_SCP 0x28
#define MT8183_HOST_IPC_INT_BIT BIT(0)
#define MT8183_WDT_CFG 0x84
#define MT8183_SCP_CLK_SW_SEL 0x4000
#define MT8183_SCP_CLK_DIV_SEL 0x4024
#define MT8183_SCP_SRAM_PDN 0x402C
#define MT8183_SCP_L1_SRAM_PD 0x4080
#define MT8183_SCP_TCM_TAIL_SRAM_PD 0x4094
#define MT8183_SCP_CACHE_SEL(x) (0x14000 + (x) * 0x3000)
#define MT8183_SCP_CACHE_CON MT8183_SCP_CACHE_SEL(0)
#define MT8183_SCP_DCACHE_CON MT8183_SCP_CACHE_SEL(1)
#define MT8183_SCP_CACHESIZE_8KB BIT(8)
#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
#define SCP_FW_VER_LEN 32
#define SCP_SHARE_BUFFER_SIZE 288
struct scp_run {
u32 signaled;
s8 fw_ver[SCP_FW_VER_LEN];
u32 dec_capability;
u32 enc_capability;
wait_queue_head_t wq;
};
struct scp_ipi_desc {
/* For protecting handler. */
struct mutex lock;
scp_ipi_handler_t handler;
void *priv;
};
struct mtk_scp {
struct device *dev;
struct rproc *rproc;
struct clk *clk;
void __iomem *reg_base;
void __iomem *sram_base;
size_t sram_size;
struct mtk_share_obj __iomem *recv_buf;
struct mtk_share_obj __iomem *send_buf;
struct scp_run run;
/* To prevent multiple ipi_send run concurrently. */
struct mutex send_lock;
struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
bool ipi_id_ack[SCP_IPI_MAX];
wait_queue_head_t ack_wq;
void __iomem *cpu_addr;
phys_addr_t phys_addr;
size_t dram_size;
struct rproc_subdev *rpmsg_subdev;
};
/**
* struct mtk_share_obj - SRAM buffer shared with AP and SCP
*
* @id: IPI id
* @len: share buffer length
* @share_buf: share buffer data
*/
struct mtk_share_obj {
u32 id;
u32 len;
u8 share_buf[SCP_SHARE_BUFFER_SIZE];
};
void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
void scp_ipi_lock(struct mtk_scp *scp, u32 id);
void scp_ipi_unlock(struct mtk_scp *scp, u32 id);
#endif
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2019 MediaTek Inc.
#include <asm/barrier.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
#include <linux/rpmsg/mtk_rpmsg.h>
#include "mtk_common.h"
#include "remoteproc_internal.h"
#define MAX_CODE_SIZE 0x500000
#define SCP_FW_END 0x7C000
/**
* scp_get() - get a reference to SCP.
*
* @pdev: the platform device of the module requesting SCP platform
* device for using SCP API.
*
* Return: Return NULL if failed. otherwise reference to SCP.
**/
struct mtk_scp *scp_get(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *scp_node;
struct platform_device *scp_pdev;
scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
if (!scp_node) {
dev_err(dev, "can't get SCP node\n");
return NULL;
}
scp_pdev = of_find_device_by_node(scp_node);
of_node_put(scp_node);
if (WARN_ON(!scp_pdev)) {
dev_err(dev, "SCP pdev failed\n");
return NULL;
}
return platform_get_drvdata(scp_pdev);
}
EXPORT_SYMBOL_GPL(scp_get);
/**
* scp_put() - "free" the SCP
*
* @scp: mtk_scp structure from scp_get().
**/
void scp_put(struct mtk_scp *scp)
{
put_device(scp->dev);
}
EXPORT_SYMBOL_GPL(scp_put);
static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
{
dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
}
static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
{
struct mtk_scp *scp = (struct mtk_scp *)priv;
struct scp_run *run = (struct scp_run *)data;
scp->run.signaled = run->signaled;
strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
scp->run.dec_capability = run->dec_capability;
scp->run.enc_capability = run->enc_capability;
wake_up_interruptible(&scp->run.wq);
}
static void scp_ipi_handler(struct mtk_scp *scp)
{
struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
scp_ipi_handler_t handler;
u32 id = readl(&rcv_obj->id);
u32 len = readl(&rcv_obj->len);
if (len > SCP_SHARE_BUFFER_SIZE) {
dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
SCP_SHARE_BUFFER_SIZE);
return;
}
if (id >= SCP_IPI_MAX) {
dev_err(scp->dev, "No such ipi id = %d\n", id);
return;
}
scp_ipi_lock(scp, id);
handler = ipi_desc[id].handler;
if (!handler) {
dev_err(scp->dev, "No such ipi id = %d\n", id);
scp_ipi_unlock(scp, id);
return;
}
memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
handler(tmp_data, len, ipi_desc[id].priv);
scp_ipi_unlock(scp, id);
scp->ipi_id_ack[id] = true;
wake_up(&scp->ack_wq);
}
static int scp_ipi_init(struct mtk_scp *scp)
{
size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
/* Disable SCP to host interrupt */
writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
/* shared buffer initialization */
scp->recv_buf =
(struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
scp->send_buf =
(struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
memset_io(scp->recv_buf, 0, sizeof(scp->recv_buf));
memset_io(scp->send_buf, 0, sizeof(scp->send_buf));
return 0;
}
static void scp_reset_assert(const struct mtk_scp *scp)
{
u32 val;
val = readl(scp->reg_base + MT8183_SW_RSTN);
val &= ~MT8183_SW_RSTN_BIT;
writel(val, scp->reg_base + MT8183_SW_RSTN);
}
static void scp_reset_deassert(const struct mtk_scp *scp)
{
u32 val;
val = readl(scp->reg_base + MT8183_SW_RSTN);
val |= MT8183_SW_RSTN_BIT;
writel(val, scp->reg_base + MT8183_SW_RSTN);
}
static irqreturn_t scp_irq_handler(int irq, void *priv)
{
struct mtk_scp *scp = priv;
u32 scp_to_host;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(scp->dev, "failed to enable clocks\n");
return IRQ_NONE;
}
scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
scp_ipi_handler(scp);
else
scp_wdt_handler(scp, scp_to_host);
/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
scp->reg_base + MT8183_SCP_TO_HOST);
clk_disable_unprepare(scp->clk);
return IRQ_HANDLED;
}
static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
struct elf32_hdr *ehdr;
struct elf32_phdr *phdr;
int i, ret = 0;
const u8 *elf_data = fw->data;
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
/* go through the available ELF segments */
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
u32 da = phdr->p_paddr;
u32 memsz = phdr->p_memsz;
u32 filesz = phdr->p_filesz;
u32 offset = phdr->p_offset;
void __iomem *ptr;
if (phdr->p_type != PT_LOAD)
continue;
dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
phdr->p_type, da, memsz, filesz);
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
/* grab the kernel address for this device address */
ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
if (phdr->p_filesz)
scp_memcpy_aligned(ptr, elf_data + phdr->p_offset,
filesz);
}
return ret;
}
static int scp_load(struct rproc *rproc, const struct firmware *fw)
{
const struct mtk_scp *scp = rproc->priv;
struct device *dev = scp->dev;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(dev, "failed to enable clocks\n");
return ret;
}
/* Hold SCP in reset while loading FW. */
scp_reset_assert(scp);
/* Reset clocks before loading FW */
writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
/* Initialize TCM before loading FW. */
writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
/* Turn on the power of SCP's SRAM before using it. */
writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
/*
* Set I-cache and D-cache size before loading SCP FW.
* SCP SRAM logical address may change when cache size setting differs.
*/
writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
scp->reg_base + MT8183_SCP_CACHE_CON);
writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
ret = scp_elf_load_segments(rproc, fw);
clk_disable_unprepare(scp->clk);
return ret;
}
static int scp_start(struct rproc *rproc)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
struct device *dev = scp->dev;
struct scp_run *run = &scp->run;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(dev, "failed to enable clocks\n");
return ret;
}
run->signaled = false;
scp_reset_deassert(scp);
ret = wait_event_interruptible_timeout(
run->wq,
run->signaled,
msecs_to_jiffies(2000));
if (ret == 0) {
dev_err(dev, "wait SCP initialization timeout!\n");
ret = -ETIME;
goto stop;
}
if (ret == -ERESTARTSYS) {
dev_err(dev, "wait SCP interrupted by a signal!\n");
goto stop;
}
clk_disable_unprepare(scp->clk);
dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
return 0;
stop:
scp_reset_assert(scp);
clk_disable_unprepare(scp->clk);
return ret;
}
static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
int offset;
if (da < scp->sram_size) {
offset = da;
if (offset >= 0 && (offset + len) < scp->sram_size)
return (void __force *)scp->sram_base + offset;
} else {
offset = da - scp->phys_addr;
if (offset >= 0 && (offset + len) < scp->dram_size)
return (void __force *)scp->cpu_addr + offset;
}
return NULL;
}
static int scp_stop(struct rproc *rproc)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
int ret;
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(scp->dev, "failed to enable clocks\n");
return ret;
}
scp_reset_assert(scp);
/* Disable SCP watchdog */
writel(0, scp->reg_base + MT8183_WDT_CFG);
clk_disable_unprepare(scp->clk);
return 0;
}
static const struct rproc_ops scp_ops = {
.start = scp_start,
.stop = scp_stop,
.load = scp_load,
.da_to_va = scp_da_to_va,
};
/**
* scp_get_device() - get device struct of SCP
*
* @scp: mtk_scp structure
**/
struct device *scp_get_device(struct mtk_scp *scp)
{
return scp->dev;
}
EXPORT_SYMBOL_GPL(scp_get_device);
/**
* scp_get_rproc() - get rproc struct of SCP
*
* @scp: mtk_scp structure
**/
struct rproc *scp_get_rproc(struct mtk_scp *scp)
{
return scp->rproc;
}
EXPORT_SYMBOL_GPL(scp_get_rproc);
/**
* scp_get_vdec_hw_capa() - get video decoder hardware capability
*
* @scp: mtk_scp structure
*
* Return: video decoder hardware capability
**/
unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
{
return scp->run.dec_capability;
}
EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
/**
* scp_get_venc_hw_capa() - get video encoder hardware capability
*
* @scp: mtk_scp structure
*
* Return: video encoder hardware capability
**/
unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
{
return scp->run.enc_capability;
}
EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
/**
* scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
*
* @scp: mtk_scp structure
* @mem_addr: SCP views memory address
*
* Mapping the SCP's SRAM address /
* DMEM (Data Extended Memory) memory address /
* Working buffer memory address to
* kernel virtual address.
*
* Return: Return ERR_PTR(-EINVAL) if mapping failed,
* otherwise the mapped kernel virtual address
**/
void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
{
void *ptr;
ptr = scp_da_to_va(scp->rproc, mem_addr, 0);
if (!ptr)
return ERR_PTR(-EINVAL);
return ptr;
}
EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
static int scp_map_memory_region(struct mtk_scp *scp)
{
int ret;
ret = of_reserved_mem_device_init(scp->dev);
if (ret) {
dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
return -ENOMEM;
}
/* Reserved SCP code size */
scp->dram_size = MAX_CODE_SIZE;
scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
&scp->phys_addr, GFP_KERNEL);
if (!scp->cpu_addr)
return -ENOMEM;
return 0;
}
static void scp_unmap_memory_region(struct mtk_scp *scp)
{
dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
scp->phys_addr);
of_reserved_mem_device_release(scp->dev);
}
static int scp_register_ipi(struct platform_device *pdev, u32 id,
ipi_handler_t handler, void *priv)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
return scp_ipi_register(scp, id, handler, priv);
}
static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
scp_ipi_unregister(scp, id);
}
static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
unsigned int len, unsigned int wait)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
return scp_ipi_send(scp, id, buf, len, wait);
}
static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
.send_ipi = scp_send_ipi,
.register_ipi = scp_register_ipi,
.unregister_ipi = scp_unregister_ipi,
.ns_ipi_id = SCP_IPI_NS_SERVICE,
};
static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
{
scp->rpmsg_subdev =
mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
&mtk_scp_rpmsg_info);
if (scp->rpmsg_subdev)
rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
}
static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
{
if (scp->rpmsg_subdev) {
rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
scp->rpmsg_subdev = NULL;
}
}
static int scp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk_scp *scp;
struct rproc *rproc;
struct resource *res;
char *fw_name = "scp.img";
int ret, i;
rproc = rproc_alloc(dev,
np->name,
&scp_ops,
fw_name,
sizeof(*scp));
if (!rproc) {
dev_err(dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
scp = (struct mtk_scp *)rproc->priv;
scp->rproc = rproc;
scp->dev = dev;
platform_set_drvdata(pdev, scp);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
scp->sram_base = devm_ioremap_resource(dev, res);
if (IS_ERR((__force void *)scp->sram_base)) {
dev_err(dev, "Failed to parse and map sram memory\n");
ret = PTR_ERR((__force void *)scp->sram_base);
goto free_rproc;
}
scp->sram_size = resource_size(res);
mutex_init(&scp->send_lock);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_init(&scp->ipi_desc[i].lock);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
scp->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR((__force void *)scp->reg_base)) {
dev_err(dev, "Failed to parse and map cfg memory\n");
ret = PTR_ERR((__force void *)scp->reg_base);
goto destroy_mutex;
}
ret = scp_map_memory_region(scp);
if (ret)
goto destroy_mutex;
scp->clk = devm_clk_get(dev, "main");
if (IS_ERR(scp->clk)) {
dev_err(dev, "Failed to get clock\n");
ret = PTR_ERR(scp->clk);
goto release_dev_mem;
}
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(dev, "failed to enable clocks\n");
goto release_dev_mem;
}
ret = scp_ipi_init(scp);
clk_disable_unprepare(scp->clk);
if (ret) {
dev_err(dev, "Failed to init ipi\n");
goto release_dev_mem;
}
/* register SCP initialization IPI */
ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
if (ret) {
dev_err(dev, "Failed to register IPI_SCP_INIT\n");
goto release_dev_mem;
}
init_waitqueue_head(&scp->run.wq);
init_waitqueue_head(&scp->ack_wq);
scp_add_rpmsg_subdev(scp);
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
scp_irq_handler, IRQF_ONESHOT,
pdev->name, scp);
if (ret) {
dev_err(dev, "failed to request irq\n");
goto remove_subdev;
}
ret = rproc_add(rproc);
if (ret)
goto remove_subdev;
return 0;
remove_subdev:
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
release_dev_mem:
scp_unmap_memory_region(scp);
destroy_mutex:
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
free_rproc:
rproc_free(rproc);
return ret;
}
static int scp_remove(struct platform_device *pdev)
{
struct mtk_scp *scp = platform_get_drvdata(pdev);
int i;
rproc_del(scp->rproc);
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
scp_unmap_memory_region(scp);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
rproc_free(scp->rproc);
return 0;
}
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp"},
{},
};
MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
static struct platform_driver mtk_scp_driver = {
.probe = scp_probe,
.remove = scp_remove,
.driver = {
.name = "mtk-scp",
.of_match_table = of_match_ptr(mtk_scp_of_match),
},
};
module_platform_driver(mtk_scp_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek SCP control driver");
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2019 MediaTek Inc.
#include <asm/barrier.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/mtk_scp.h>
#include "mtk_common.h"
/**
* scp_ipi_register() - register an ipi function
*
* @scp: mtk_scp structure
* @id: IPI ID
* @handler: IPI handler
* @priv: private data for IPI handler
*
* Register an ipi function to receive ipi interrupt from SCP.
*
* Returns 0 if ipi registers successfully, -error on error.
*/
int scp_ipi_register(struct mtk_scp *scp,
u32 id,
scp_ipi_handler_t handler,
void *priv)
{
if (!scp) {
dev_err(scp->dev, "scp device is not ready\n");
return -EPROBE_DEFER;
}
if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
return -EINVAL;
scp_ipi_lock(scp, id);
scp->ipi_desc[id].handler = handler;
scp->ipi_desc[id].priv = priv;
scp_ipi_unlock(scp, id);
return 0;
}
EXPORT_SYMBOL_GPL(scp_ipi_register);
/**
* scp_ipi_unregister() - unregister an ipi function
*
* @scp: mtk_scp structure
* @id: IPI ID
*
* Unregister an ipi function to receive ipi interrupt from SCP.
*/
void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
{
if (!scp)
return;
if (WARN_ON(id >= SCP_IPI_MAX))
return;
scp_ipi_lock(scp, id);
scp->ipi_desc[id].handler = NULL;
scp->ipi_desc[id].priv = NULL;
scp_ipi_unlock(scp, id);
}
EXPORT_SYMBOL_GPL(scp_ipi_unregister);
/*
* scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
*
* @dst: Pointer to the destination buffer, should be in SCP SRAM region.
* @src: Pointer to the source buffer.
* @len: Length of the source buffer to be copied.
*
* Since AP access of SCP SRAM don't support byte write, this always write a
* full word at a time, and may cause some extra bytes to be written at the
* beginning & ending of dst.
*/
void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
{
void __iomem *ptr;
u32 val;
unsigned int i = 0, remain;
if (!IS_ALIGNED((unsigned long)dst, 4)) {
ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
i = 4 - (dst - ptr);
val = readl_relaxed(ptr);
memcpy((u8 *)&val + (4 - i), src, i);
writel_relaxed(val, ptr);
}
__iowrite32_copy(dst + i, src + i, (len - i) / 4);
remain = (len - i) % 4;
if (remain > 0) {
val = readl_relaxed(dst + len - remain);
memcpy(&val, src + len - remain, remain);
writel_relaxed(val, dst + len - remain);
}
}
EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
/**
* scp_ipi_lock() - Lock before operations of an IPI ID
*
* @scp: mtk_scp structure
* @id: IPI ID
*
* Note: This should not be used by drivers other than mtk_scp.
*/
void scp_ipi_lock(struct mtk_scp *scp, u32 id)
{
if (WARN_ON(id >= SCP_IPI_MAX))
return;
mutex_lock(&scp->ipi_desc[id].lock);
}
EXPORT_SYMBOL_GPL(scp_ipi_lock);
/**
* scp_ipi_lock() - Unlock after operations of an IPI ID
*
* @scp: mtk_scp structure
* @id: IPI ID
*
* Note: This should not be used by drivers other than mtk_scp.
*/
void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
{
if (WARN_ON(id >= SCP_IPI_MAX))
return;
mutex_unlock(&scp->ipi_desc[id].lock);
}
EXPORT_SYMBOL_GPL(scp_ipi_unlock);
/**
* scp_ipi_send() - send data from AP to scp.
*
* @scp: mtk_scp structure
* @id: IPI ID
* @buf: the data buffer
* @len: the data buffer length
* @wait: number of msecs to wait for ack. 0 to skip waiting.
*
* This function is thread-safe. When this function returns,
* SCP has received the data and starts the processing.
* When the processing completes, IPI handler registered
* by scp_ipi_register will be called in interrupt context.
*
* Returns 0 if sending data successfully, -error on error.
**/
int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
unsigned int wait)
{
struct mtk_share_obj __iomem *send_obj = scp->send_buf;
unsigned long timeout;
int ret;
if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
WARN_ON(id == SCP_IPI_NS_SERVICE) ||
WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
return -EINVAL;
mutex_lock(&scp->send_lock);
ret = clk_prepare_enable(scp->clk);
if (ret) {
dev_err(scp->dev, "failed to enable clock\n");
goto unlock_mutex;
}
/* Wait until SCP receives the last command */
timeout = jiffies + msecs_to_jiffies(2000);
do {
if (time_after(jiffies, timeout)) {
dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
ret = -ETIMEDOUT;
goto clock_disable;
}
} while (readl(scp->reg_base + MT8183_HOST_TO_SCP));
scp_memcpy_aligned(send_obj->share_buf, buf, len);
writel(len, &send_obj->len);
writel(id, &send_obj->id);
scp->ipi_id_ack[id] = false;
/* send the command to SCP */
writel(MT8183_HOST_IPC_INT_BIT, scp->reg_base + MT8183_HOST_TO_SCP);
if (wait) {
/* wait for SCP's ACK */
timeout = msecs_to_jiffies(wait);
ret = wait_event_timeout(scp->ack_wq,
scp->ipi_id_ack[id],
timeout);
scp->ipi_id_ack[id] = false;
if (WARN(!ret, "scp ipi %d ack time out !", id))
ret = -EIO;
else
ret = 0;
}
clock_disable:
clk_disable_unprepare(scp->clk);
unlock_mutex:
mutex_unlock(&scp->send_lock);
return ret;
}
EXPORT_SYMBOL_GPL(scp_ipi_send);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek scp IPI interface");
...@@ -68,14 +68,24 @@ ...@@ -68,14 +68,24 @@
#define AXI_HALTREQ_REG 0x0 #define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4 #define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8 #define AXI_IDLE_REG 0x8
#define NAV_AXI_HALTREQ_BIT BIT(0)
#define NAV_AXI_HALTACK_BIT BIT(1)
#define NAV_AXI_IDLE_BIT BIT(2)
#define AXI_GATING_VALID_OVERRIDE BIT(0)
#define HALT_ACK_TIMEOUT_MS 100 #define HALT_ACK_TIMEOUT_US 100000
#define NAV_HALT_ACK_TIMEOUT_US 200
/* QDSP6SS_RESET */ /* QDSP6SS_RESET */
#define Q6SS_STOP_CORE BIT(0) #define Q6SS_STOP_CORE BIT(0)
#define Q6SS_CORE_ARES BIT(1) #define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2) #define Q6SS_BUS_ARES_ENABLE BIT(2)
/* QDSP6SS CBCR */
#define Q6SS_CBCR_CLKEN BIT(0)
#define Q6SS_CBCR_CLKOFF BIT(31)
#define Q6SS_CBCR_TIMEOUT_US 200
/* QDSP6SS_GFMUX_CTL */ /* QDSP6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1) #define Q6SS_CLK_ENABLE BIT(1)
...@@ -96,15 +106,16 @@ ...@@ -96,15 +106,16 @@
#define QDSP6v56_BHS_ON BIT(24) #define QDSP6v56_BHS_ON BIT(24)
#define QDSP6v56_CLAMP_WL BIT(21) #define QDSP6v56_CLAMP_WL BIT(21)
#define QDSP6v56_CLAMP_QMC_MEM BIT(22) #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
#define HALT_CHECK_MAX_LOOPS 200
#define QDSP6SS_XO_CBCR 0x0038 #define QDSP6SS_XO_CBCR 0x0038
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
/* QDSP6v65 parameters */ /* QDSP6v65 parameters */
#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP 0x3C #define QDSP6SS_SLEEP 0x3C
#define QDSP6SS_BOOT_CORE_START 0x400 #define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404 #define QDSP6SS_BOOT_CMD 0x404
#define SLEEP_CHECK_MAX_LOOPS 200 #define QDSP6SS_BOOT_STATUS 0x408
#define BOOT_STATUS_TIMEOUT_US 200
#define BOOT_FSM_TIMEOUT 10000 #define BOOT_FSM_TIMEOUT 10000
struct reg_info { struct reg_info {
...@@ -131,6 +142,7 @@ struct rproc_hexagon_res { ...@@ -131,6 +142,7 @@ struct rproc_hexagon_res {
int version; int version;
bool need_mem_protection; bool need_mem_protection;
bool has_alt_reset; bool has_alt_reset;
bool has_halt_nav;
}; };
struct q6v5 { struct q6v5 {
...@@ -141,9 +153,14 @@ struct q6v5 { ...@@ -141,9 +153,14 @@ struct q6v5 {
void __iomem *rmb_base; void __iomem *rmb_base;
struct regmap *halt_map; struct regmap *halt_map;
struct regmap *halt_nav_map;
struct regmap *conn_map;
u32 halt_q6; u32 halt_q6;
u32 halt_modem; u32 halt_modem;
u32 halt_nc; u32 halt_nc;
u32 halt_nav;
u32 conn_box;
struct reset_control *mss_restart; struct reset_control *mss_restart;
struct reset_control *pdc_reset; struct reset_control *pdc_reset;
...@@ -187,6 +204,7 @@ struct q6v5 { ...@@ -187,6 +204,7 @@ struct q6v5 {
struct qcom_sysmon *sysmon; struct qcom_sysmon *sysmon;
bool need_mem_protection; bool need_mem_protection;
bool has_alt_reset; bool has_alt_reset;
bool has_halt_nav;
int mpss_perm; int mpss_perm;
int mba_perm; int mba_perm;
const char *hexagon_mdt_image; const char *hexagon_mdt_image;
...@@ -198,6 +216,7 @@ enum { ...@@ -198,6 +216,7 @@ enum {
MSS_MSM8974, MSS_MSM8974,
MSS_MSM8996, MSS_MSM8996,
MSS_MSM8998, MSS_MSM8998,
MSS_SC7180,
MSS_SDM845, MSS_SDM845,
}; };
...@@ -396,6 +415,26 @@ static int q6v5_reset_assert(struct q6v5 *qproc) ...@@ -396,6 +415,26 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
reset_control_assert(qproc->pdc_reset); reset_control_assert(qproc->pdc_reset);
ret = reset_control_reset(qproc->mss_restart); ret = reset_control_reset(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset); reset_control_deassert(qproc->pdc_reset);
} else if (qproc->has_halt_nav) {
/*
* When the AXI pipeline is being reset with the Q6 modem partly
* operational there is possibility of AXI valid signal to
* glitch, leading to spurious transactions and Q6 hangs. A work
* around is employed by asserting the AXI_GATING_VALID_OVERRIDE
* BIT before triggering Q6 MSS reset. Both the HALTREQ and
* AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
* followed by a MSS deassert, while holding the PDC reset.
*/
reset_control_assert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 1);
regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
NAV_AXI_HALTREQ_BIT, 0);
reset_control_assert(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 0);
ret = reset_control_deassert(qproc->mss_restart);
} else { } else {
ret = reset_control_assert(qproc->mss_restart); ret = reset_control_assert(qproc->mss_restart);
} }
...@@ -413,6 +452,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc) ...@@ -413,6 +452,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
ret = reset_control_reset(qproc->mss_restart); ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
reset_control_deassert(qproc->pdc_reset); reset_control_deassert(qproc->pdc_reset);
} else if (qproc->has_halt_nav) {
ret = reset_control_reset(qproc->mss_restart);
} else { } else {
ret = reset_control_deassert(qproc->mss_restart); ret = reset_control_deassert(qproc->mss_restart);
} }
...@@ -474,12 +515,12 @@ static int q6v5proc_reset(struct q6v5 *qproc) ...@@ -474,12 +515,12 @@ static int q6v5proc_reset(struct q6v5 *qproc)
if (qproc->version == MSS_SDM845) { if (qproc->version == MSS_SDM845) {
val = readl(qproc->reg_base + QDSP6SS_SLEEP); val = readl(qproc->reg_base + QDSP6SS_SLEEP);
val |= 0x1; val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_SLEEP); writel(val, qproc->reg_base + QDSP6SS_SLEEP);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
val, !(val & BIT(31)), 1, val, !(val & Q6SS_CBCR_CLKOFF), 1,
SLEEP_CHECK_MAX_LOOPS); Q6SS_CBCR_TIMEOUT_US);
if (ret) { if (ret) {
dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
return -ETIMEDOUT; return -ETIMEDOUT;
...@@ -499,6 +540,54 @@ static int q6v5proc_reset(struct q6v5 *qproc) ...@@ -499,6 +540,54 @@ static int q6v5proc_reset(struct q6v5 *qproc)
return ret; return ret;
} }
goto pbl_wait;
} else if (qproc->version == MSS_SC7180) {
val = readl(qproc->reg_base + QDSP6SS_SLEEP);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_SLEEP);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
return -ETIMEDOUT;
}
/* Turn on the XO clock needed for PLL setup */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
val, !(val & Q6SS_CBCR_CLKOFF), 1,
Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
return -ETIMEDOUT;
}
/* Configure Q6 core CBCR to auto-enable after reset sequence */
val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
/* De-assert the Q6 stop core signal */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
/* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
val, (val & BIT(0)) != 0, 1,
BOOT_STATUS_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n");
/* Reset the modem so that boot FSM is in reset state */
q6v5_reset_deassert(qproc);
return ret;
}
goto pbl_wait; goto pbl_wait;
} else if (qproc->version == MSS_MSM8996 || } else if (qproc->version == MSS_MSM8996 ||
qproc->version == MSS_MSM8998) { qproc->version == MSS_MSM8998) {
...@@ -515,13 +604,13 @@ static int q6v5proc_reset(struct q6v5 *qproc) ...@@ -515,13 +604,13 @@ static int q6v5proc_reset(struct q6v5 *qproc)
/* BHS require xo cbcr to be enabled */ /* BHS require xo cbcr to be enabled */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
val |= 0x1; val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */ /* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
val, !(val & BIT(31)), 1, val, !(val & Q6SS_CBCR_CLKOFF), 1,
HALT_CHECK_MAX_LOOPS); Q6SS_CBCR_TIMEOUT_US);
if (ret) { if (ret) {
dev_err(qproc->dev, dev_err(qproc->dev,
"xo cbcr enabling timed out (rc:%d)\n", ret); "xo cbcr enabling timed out (rc:%d)\n", ret);
...@@ -637,7 +726,6 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, ...@@ -637,7 +726,6 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
struct regmap *halt_map, struct regmap *halt_map,
u32 offset) u32 offset)
{ {
unsigned long timeout;
unsigned int val; unsigned int val;
int ret; int ret;
...@@ -650,14 +738,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, ...@@ -650,14 +738,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
/* Wait for halt */ /* Wait for halt */
timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
for (;;) { val, 1000, HALT_ACK_TIMEOUT_US);
ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
if (ret || val || time_after(jiffies, timeout))
break;
msleep(1);
}
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (ret || !val) if (ret || !val)
...@@ -667,6 +749,32 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, ...@@ -667,6 +749,32 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
} }
static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
struct regmap *halt_map,
u32 offset)
{
unsigned int val;
int ret;
/* Check if we're already idle */
ret = regmap_read(halt_map, offset, &val);
if (!ret && (val & NAV_AXI_IDLE_BIT))
return;
/* Assert halt request */
regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
NAV_AXI_HALTREQ_BIT);
/* Wait for halt ack*/
regmap_read_poll_timeout(halt_map, offset, val,
(val & NAV_AXI_HALTACK_BIT),
5, NAV_HALT_ACK_TIMEOUT_US);
ret = regmap_read(halt_map, offset, &val);
if (ret || !(val & NAV_AXI_IDLE_BIT))
dev_err(qproc->dev, "port failed halt\n");
}
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{ {
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
...@@ -829,6 +937,9 @@ static int q6v5_mba_load(struct q6v5 *qproc) ...@@ -829,6 +937,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
halt_axi_ports: halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
if (qproc->has_halt_nav)
q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
qproc->halt_nav);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba: reclaim_mba:
...@@ -876,6 +987,9 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) ...@@ -876,6 +987,9 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
if (qproc->has_halt_nav)
q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
qproc->halt_nav);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) { if (qproc->version == MSS_MSM8996) {
/* /*
...@@ -1253,6 +1367,47 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) ...@@ -1253,6 +1367,47 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
qproc->halt_modem = args.args[1]; qproc->halt_modem = args.args[1];
qproc->halt_nc = args.args[2]; qproc->halt_nc = args.args[2];
if (qproc->has_halt_nav) {
struct platform_device *nav_pdev;
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,halt-nav-regs",
1, 0, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
return -EINVAL;
}
nav_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!nav_pdev) {
dev_err(&pdev->dev, "failed to get mss clock device\n");
return -EPROBE_DEFER;
}
qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
if (!qproc->halt_nav_map) {
dev_err(&pdev->dev, "failed to get map from device\n");
return -EINVAL;
}
qproc->halt_nav = args.args[0];
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,halt-nav-regs",
1, 1, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
return -EINVAL;
}
qproc->conn_map = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(qproc->conn_map))
return PTR_ERR(qproc->conn_map);
qproc->conn_box = args.args[0];
}
return 0; return 0;
} }
...@@ -1327,7 +1482,7 @@ static int q6v5_init_reset(struct q6v5 *qproc) ...@@ -1327,7 +1482,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
return PTR_ERR(qproc->mss_restart); return PTR_ERR(qproc->mss_restart);
} }
if (qproc->has_alt_reset) { if (qproc->has_alt_reset || qproc->has_halt_nav) {
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
"pdc_reset"); "pdc_reset");
if (IS_ERR(qproc->pdc_reset)) { if (IS_ERR(qproc->pdc_reset)) {
...@@ -1426,6 +1581,7 @@ static int q6v5_probe(struct platform_device *pdev) ...@@ -1426,6 +1581,7 @@ static int q6v5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qproc); platform_set_drvdata(pdev, qproc);
qproc->has_halt_nav = desc->has_halt_nav;
ret = q6v5_init_mem(qproc, pdev); ret = q6v5_init_mem(qproc, pdev);
if (ret) if (ret)
goto free_rproc; goto free_rproc;
...@@ -1549,6 +1705,41 @@ static int q6v5_remove(struct platform_device *pdev) ...@@ -1549,6 +1705,41 @@ static int q6v5_remove(struct platform_device *pdev)
return 0; return 0;
} }
static const struct rproc_hexagon_res sc7180_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
NULL
},
.reset_clk_names = (char*[]){
"iface",
"bus",
"snoc_axi",
NULL
},
.active_clk_names = (char*[]){
"mnoc_axi",
"nav",
"mss_nav",
"mss_crypto",
NULL
},
.active_pd_names = (char*[]){
"load_state",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mx",
"mss",
NULL
},
.need_mem_protection = true,
.has_alt_reset = false,
.has_halt_nav = true,
.version = MSS_SC7180,
};
static const struct rproc_hexagon_res sdm845_mss = { static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn", .hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){ .proxy_clk_names = (char*[]){
...@@ -1580,6 +1771,7 @@ static const struct rproc_hexagon_res sdm845_mss = { ...@@ -1580,6 +1771,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
}, },
.need_mem_protection = true, .need_mem_protection = true,
.has_alt_reset = true, .has_alt_reset = true,
.has_halt_nav = false,
.version = MSS_SDM845, .version = MSS_SDM845,
}; };
...@@ -1594,7 +1786,6 @@ static const struct rproc_hexagon_res msm8998_mss = { ...@@ -1594,7 +1786,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
.active_clk_names = (char*[]){ .active_clk_names = (char*[]){
"iface", "iface",
"bus", "bus",
"mem",
"gpll0_mss", "gpll0_mss",
"mnoc_axi", "mnoc_axi",
"snoc_axi", "snoc_axi",
...@@ -1607,6 +1798,7 @@ static const struct rproc_hexagon_res msm8998_mss = { ...@@ -1607,6 +1798,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
}, },
.need_mem_protection = true, .need_mem_protection = true,
.has_alt_reset = false, .has_alt_reset = false,
.has_halt_nav = false,
.version = MSS_MSM8998, .version = MSS_MSM8998,
}; };
...@@ -1636,6 +1828,7 @@ static const struct rproc_hexagon_res msm8996_mss = { ...@@ -1636,6 +1828,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
}, },
.need_mem_protection = true, .need_mem_protection = true,
.has_alt_reset = false, .has_alt_reset = false,
.has_halt_nav = false,
.version = MSS_MSM8996, .version = MSS_MSM8996,
}; };
...@@ -1668,6 +1861,7 @@ static const struct rproc_hexagon_res msm8916_mss = { ...@@ -1668,6 +1861,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
}, },
.need_mem_protection = false, .need_mem_protection = false,
.has_alt_reset = false, .has_alt_reset = false,
.has_halt_nav = false,
.version = MSS_MSM8916, .version = MSS_MSM8916,
}; };
...@@ -1708,6 +1902,7 @@ static const struct rproc_hexagon_res msm8974_mss = { ...@@ -1708,6 +1902,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
}, },
.need_mem_protection = false, .need_mem_protection = false,
.has_alt_reset = false, .has_alt_reset = false,
.has_halt_nav = false,
.version = MSS_MSM8974, .version = MSS_MSM8974,
}; };
...@@ -1717,6 +1912,7 @@ static const struct of_device_id q6v5_of_match[] = { ...@@ -1717,6 +1912,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ }, { },
}; };
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/qcom_scm.h> #include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/remoteproc.h> #include <linux/remoteproc.h>
...@@ -31,6 +33,10 @@ struct adsp_data { ...@@ -31,6 +33,10 @@ struct adsp_data {
const char *firmware_name; const char *firmware_name;
int pas_id; int pas_id;
bool has_aggre2_clk; bool has_aggre2_clk;
bool auto_boot;
char **active_pd_names;
char **proxy_pd_names;
const char *ssr_name; const char *ssr_name;
const char *sysmon_name; const char *sysmon_name;
...@@ -49,6 +55,12 @@ struct qcom_adsp { ...@@ -49,6 +55,12 @@ struct qcom_adsp {
struct regulator *cx_supply; struct regulator *cx_supply;
struct regulator *px_supply; struct regulator *px_supply;
struct device *active_pds[1];
struct device *proxy_pds[3];
int active_pd_count;
int proxy_pd_count;
int pas_id; int pas_id;
int crash_reason_smem; int crash_reason_smem;
bool has_aggre2_clk; bool has_aggre2_clk;
...@@ -67,6 +79,41 @@ struct qcom_adsp { ...@@ -67,6 +79,41 @@ struct qcom_adsp {
struct qcom_sysmon *sysmon; struct qcom_sysmon *sysmon;
}; };
static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
int ret;
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
if (ret < 0)
goto unroll_pd_votes;
}
return 0;
unroll_pd_votes:
for (i--; i >= 0; i--) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
return ret;
};
static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
int i;
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], 0);
pm_runtime_put(pds[i]);
}
}
static int adsp_load(struct rproc *rproc, const struct firmware *fw) static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{ {
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
...@@ -84,9 +131,17 @@ static int adsp_start(struct rproc *rproc) ...@@ -84,9 +131,17 @@ static int adsp_start(struct rproc *rproc)
qcom_q6v5_prepare(&adsp->q6v5); qcom_q6v5_prepare(&adsp->q6v5);
ret = adsp_pds_enable(adsp, adsp->active_pds, adsp->active_pd_count);
if (ret < 0)
goto disable_irqs;
ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
if (ret < 0)
goto disable_active_pds;
ret = clk_prepare_enable(adsp->xo); ret = clk_prepare_enable(adsp->xo);
if (ret) if (ret)
return ret; goto disable_proxy_pds;
ret = clk_prepare_enable(adsp->aggre2_clk); ret = clk_prepare_enable(adsp->aggre2_clk);
if (ret) if (ret)
...@@ -124,6 +179,12 @@ static int adsp_start(struct rproc *rproc) ...@@ -124,6 +179,12 @@ static int adsp_start(struct rproc *rproc)
clk_disable_unprepare(adsp->aggre2_clk); clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk: disable_xo_clk:
clk_disable_unprepare(adsp->xo); clk_disable_unprepare(adsp->xo);
disable_proxy_pds:
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
disable_active_pds:
adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
disable_irqs:
qcom_q6v5_unprepare(&adsp->q6v5);
return ret; return ret;
} }
...@@ -136,6 +197,7 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5) ...@@ -136,6 +197,7 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
regulator_disable(adsp->cx_supply); regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk); clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo); clk_disable_unprepare(adsp->xo);
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
} }
static int adsp_stop(struct rproc *rproc) static int adsp_stop(struct rproc *rproc)
...@@ -152,6 +214,7 @@ static int adsp_stop(struct rproc *rproc) ...@@ -152,6 +214,7 @@ static int adsp_stop(struct rproc *rproc)
if (ret) if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret); dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
handover = qcom_q6v5_unprepare(&adsp->q6v5); handover = qcom_q6v5_unprepare(&adsp->q6v5);
if (handover) if (handover)
qcom_pas_handover(&adsp->q6v5); qcom_pas_handover(&adsp->q6v5);
...@@ -217,6 +280,59 @@ static int adsp_init_regulator(struct qcom_adsp *adsp) ...@@ -217,6 +280,59 @@ static int adsp_init_regulator(struct qcom_adsp *adsp)
return PTR_ERR_OR_ZERO(adsp->px_supply); return PTR_ERR_OR_ZERO(adsp->px_supply);
} }
static int adsp_pds_attach(struct device *dev, struct device **devs,
char **pd_names)
{
size_t num_pds = 0;
int ret;
int i;
if (!pd_names)
return 0;
/* Handle single power domain */
if (dev->pm_domain) {
devs[0] = dev;
pm_runtime_enable(dev);
return 1;
}
while (pd_names[num_pds])
num_pds++;
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
ret = PTR_ERR(devs[i]) ? : -ENODATA;
goto unroll_attach;
}
}
return num_pds;
unroll_attach:
for (i--; i >= 0; i--)
dev_pm_domain_detach(devs[i], false);
return ret;
};
static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
size_t pd_count)
{
struct device *dev = adsp->dev;
int i;
/* Handle single power domain */
if (dev->pm_domain && pd_count) {
pm_runtime_disable(dev);
return;
}
for (i = 0; i < pd_count; i++)
dev_pm_domain_detach(pds[i], false);
}
static int adsp_alloc_memory_region(struct qcom_adsp *adsp) static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{ {
struct device_node *node; struct device_node *node;
...@@ -273,6 +389,8 @@ static int adsp_probe(struct platform_device *pdev) ...@@ -273,6 +389,8 @@ static int adsp_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
} }
rproc->auto_boot = desc->auto_boot;
adsp = (struct qcom_adsp *)rproc->priv; adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev; adsp->dev = &pdev->dev;
adsp->rproc = rproc; adsp->rproc = rproc;
...@@ -292,10 +410,22 @@ static int adsp_probe(struct platform_device *pdev) ...@@ -292,10 +410,22 @@ static int adsp_probe(struct platform_device *pdev)
if (ret) if (ret)
goto free_rproc; goto free_rproc;
ret = adsp_pds_attach(&pdev->dev, adsp->active_pds,
desc->active_pd_names);
if (ret < 0)
goto free_rproc;
adsp->active_pd_count = ret;
ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
desc->proxy_pd_names);
if (ret < 0)
goto detach_active_pds;
adsp->proxy_pd_count = ret;
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
qcom_pas_handover); qcom_pas_handover);
if (ret) if (ret)
goto free_rproc; goto detach_proxy_pds;
qcom_add_glink_subdev(rproc, &adsp->glink_subdev); qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev); qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
...@@ -305,15 +435,19 @@ static int adsp_probe(struct platform_device *pdev) ...@@ -305,15 +435,19 @@ static int adsp_probe(struct platform_device *pdev)
desc->ssctl_id); desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) { if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon); ret = PTR_ERR(adsp->sysmon);
goto free_rproc; goto detach_proxy_pds;
} }
ret = rproc_add(rproc); ret = rproc_add(rproc);
if (ret) if (ret)
goto free_rproc; goto detach_proxy_pds;
return 0; return 0;
detach_proxy_pds:
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
detach_active_pds:
adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count);
free_rproc: free_rproc:
rproc_free(rproc); rproc_free(rproc);
...@@ -340,6 +474,41 @@ static const struct adsp_data adsp_resource_init = { ...@@ -340,6 +474,41 @@ static const struct adsp_data adsp_resource_init = {
.firmware_name = "adsp.mdt", .firmware_name = "adsp.mdt",
.pas_id = 1, .pas_id = 1,
.has_aggre2_clk = false, .has_aggre2_clk = false,
.auto_boot = true,
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data sm8150_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.has_aggre2_clk = false,
.auto_boot = true,
.active_pd_names = (char*[]){
"load_state",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
};
static const struct adsp_data msm8998_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
.has_aggre2_clk = false,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.ssr_name = "lpass", .ssr_name = "lpass",
.sysmon_name = "adsp", .sysmon_name = "adsp",
.ssctl_id = 0x14, .ssctl_id = 0x14,
...@@ -350,16 +519,92 @@ static const struct adsp_data cdsp_resource_init = { ...@@ -350,16 +519,92 @@ static const struct adsp_data cdsp_resource_init = {
.firmware_name = "cdsp.mdt", .firmware_name = "cdsp.mdt",
.pas_id = 18, .pas_id = 18,
.has_aggre2_clk = false, .has_aggre2_clk = false,
.auto_boot = true,
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
static const struct adsp_data sm8150_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.has_aggre2_clk = false,
.auto_boot = true,
.active_pd_names = (char*[]){
"load_state",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
NULL
},
.ssr_name = "cdsp", .ssr_name = "cdsp",
.sysmon_name = "cdsp", .sysmon_name = "cdsp",
.ssctl_id = 0x17, .ssctl_id = 0x17,
}; };
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
.has_aggre2_clk = false,
.auto_boot = false,
.active_pd_names = (char*[]){
"load_state",
NULL
},
.proxy_pd_names = (char*[]){
"cx",
"mss",
NULL
},
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
};
static const struct adsp_data slpi_resource_init = { static const struct adsp_data slpi_resource_init = {
.crash_reason_smem = 424, .crash_reason_smem = 424,
.firmware_name = "slpi.mdt", .firmware_name = "slpi.mdt",
.pas_id = 12, .pas_id = 12,
.has_aggre2_clk = true, .has_aggre2_clk = true,
.auto_boot = true,
.ssr_name = "dsps",
.sysmon_name = "slpi",
.ssctl_id = 0x16,
};
static const struct adsp_data sm8150_slpi_resource = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
.has_aggre2_clk = false,
.auto_boot = true,
.active_pd_names = (char*[]){
"load_state",
NULL
},
.proxy_pd_names = (char*[]){
"lcx",
"lmx",
NULL
},
.ssr_name = "dsps",
.sysmon_name = "slpi",
.ssctl_id = 0x16,
};
static const struct adsp_data msm8998_slpi_resource = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
.has_aggre2_clk = true,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"ssc_cx",
NULL
},
.ssr_name = "dsps", .ssr_name = "dsps",
.sysmon_name = "slpi", .sysmon_name = "slpi",
.ssctl_id = 0x16, .ssctl_id = 0x16,
...@@ -369,6 +614,7 @@ static const struct adsp_data wcss_resource_init = { ...@@ -369,6 +614,7 @@ static const struct adsp_data wcss_resource_init = {
.crash_reason_smem = 421, .crash_reason_smem = 421,
.firmware_name = "wcnss.mdt", .firmware_name = "wcnss.mdt",
.pas_id = 6, .pas_id = 6,
.auto_boot = true,
.ssr_name = "mpss", .ssr_name = "mpss",
.sysmon_name = "wcnss", .sysmon_name = "wcnss",
.ssctl_id = 0x12, .ssctl_id = 0x12,
...@@ -378,11 +624,17 @@ static const struct of_device_id adsp_of_match[] = { ...@@ -378,11 +624,17 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init}, { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8998_adsp_resource},
{ .compatible = "qcom,msm8998-slpi-pas", .data = &msm8998_slpi_resource},
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init }, { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init }, { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init }, { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init}, { .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init}, { .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, adsp_of_match); MODULE_DEVICE_TABLE(of, adsp_of_match);
......
...@@ -394,7 +394,7 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc) ...@@ -394,7 +394,7 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
break; break;
default: default:
return -EINVAL; return -EINVAL;
}; }
sysmon->ssctl_version = svc->version; sysmon->ssctl_version = svc->version;
......
...@@ -477,8 +477,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, ...@@ -477,8 +477,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
char name[16]; char name[16];
/* make sure resource isn't truncated */ /* make sure resource isn't truncated */
if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
+ rsc->config_len > avail) { avail) {
dev_err(dev, "vdev rsc is truncated\n"); dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2223,7 +2223,7 @@ static int __init remoteproc_init(void) ...@@ -2223,7 +2223,7 @@ static int __init remoteproc_init(void)
return 0; return 0;
} }
module_init(remoteproc_init); subsys_initcall(remoteproc_init);
static void __exit remoteproc_exit(void) static void __exit remoteproc_exit(void)
{ {
......
...@@ -15,6 +15,15 @@ config RPMSG_CHAR ...@@ -15,6 +15,15 @@ config RPMSG_CHAR
in /dev. They make it possible for user-space programs to send and in /dev. They make it possible for user-space programs to send and
receive rpmsg packets. receive rpmsg packets.
config RPMSG_MTK_SCP
tristate "MediaTek SCP"
depends on MTK_SCP
select RPMSG
help
Say y here to enable support providing communication channels to
remote processors in MediaTek platforms.
This use IPI and IPC to communicate with remote processors.
config RPMSG_QCOM_GLINK_NATIVE config RPMSG_QCOM_GLINK_NATIVE
tristate tristate
select RPMSG select RPMSG
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RPMSG) += rpmsg_core.o obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
......
// SPDX-License-Identifier: GPL-2.0
//
// Copyright 2019 Google LLC.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/rpmsg/mtk_rpmsg.h>
#include <linux/workqueue.h>
#include "rpmsg_internal.h"
struct mtk_rpmsg_rproc_subdev {
struct platform_device *pdev;
struct mtk_rpmsg_info *info;
struct rpmsg_endpoint *ns_ept;
struct rproc_subdev subdev;
struct work_struct register_work;
struct list_head channels;
struct mutex channels_lock;
};
#define to_mtk_subdev(d) container_of(d, struct mtk_rpmsg_rproc_subdev, subdev)
struct mtk_rpmsg_channel_info {
struct rpmsg_channel_info info;
bool registered;
struct list_head list;
};
/**
* struct rpmsg_ns_msg - dynamic name service announcement message
* @name: name of remote service that is published
* @addr: address of remote service that is published
*
* This message is sent across to publish a new service. When we receive these
* messages, an appropriate rpmsg channel (i.e device) is created. In turn, the
* ->probe() handler of the appropriate rpmsg driver will be invoked
* (if/as-soon-as one is registered).
*/
struct rpmsg_ns_msg {
char name[RPMSG_NAME_SIZE];
u32 addr;
} __packed;
struct mtk_rpmsg_device {
struct rpmsg_device rpdev;
struct mtk_rpmsg_rproc_subdev *mtk_subdev;
};
struct mtk_rpmsg_endpoint {
struct rpmsg_endpoint ept;
struct mtk_rpmsg_rproc_subdev *mtk_subdev;
};
#define to_mtk_rpmsg_device(r) container_of(r, struct mtk_rpmsg_device, rpdev)
#define to_mtk_rpmsg_endpoint(r) container_of(r, struct mtk_rpmsg_endpoint, ept)
static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops;
static void __mtk_ept_release(struct kref *kref)
{
struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
refcount);
kfree(to_mtk_rpmsg_endpoint(ept));
}
static void mtk_rpmsg_ipi_handler(void *data, unsigned int len, void *priv)
{
struct mtk_rpmsg_endpoint *mept = priv;
struct rpmsg_endpoint *ept = &mept->ept;
int ret;
ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
if (ret)
dev_warn(&ept->rpdev->dev, "rpmsg handler return error = %d",
ret);
}
static struct rpmsg_endpoint *
__mtk_create_ept(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
u32 id)
{
struct mtk_rpmsg_endpoint *mept;
struct rpmsg_endpoint *ept;
struct platform_device *pdev = mtk_subdev->pdev;
int ret;
mept = kzalloc(sizeof(*mept), GFP_KERNEL);
if (!mept)
return NULL;
mept->mtk_subdev = mtk_subdev;
ept = &mept->ept;
kref_init(&ept->refcount);
ept->rpdev = rpdev;
ept->cb = cb;
ept->priv = priv;
ept->ops = &mtk_rpmsg_endpoint_ops;
ept->addr = id;
ret = mtk_subdev->info->register_ipi(pdev, id, mtk_rpmsg_ipi_handler,
mept);
if (ret) {
dev_err(&pdev->dev, "IPI register failed, id = %d", id);
kref_put(&ept->refcount, __mtk_ept_release);
return NULL;
}
return ept;
}
static struct rpmsg_endpoint *
mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_device(rpdev)->mtk_subdev;
return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
}
static void mtk_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
mtk_subdev->info->unregister_ipi(mtk_subdev->pdev, ept->addr);
kref_put(&ept->refcount, __mtk_ept_release);
}
static int mtk_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
len, 0);
}
static int mtk_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
/*
* TODO: This currently is same as mtk_rpmsg_send, and wait until SCP
* received the last command.
*/
return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
len, 0);
}
static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops = {
.destroy_ept = mtk_rpmsg_destroy_ept,
.send = mtk_rpmsg_send,
.trysend = mtk_rpmsg_trysend,
};
static void mtk_rpmsg_release_device(struct device *dev)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
struct mtk_rpmsg_device *mdev = to_mtk_rpmsg_device(rpdev);
kfree(mdev);
}
static const struct rpmsg_device_ops mtk_rpmsg_device_ops = {
.create_ept = mtk_rpmsg_create_ept,
};
static struct device_node *
mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
{
struct device_node *child;
const char *name;
int ret;
for_each_available_child_of_node(node, child) {
ret = of_property_read_string(child, "mtk,rpmsg-name", &name);
if (ret)
continue;
if (strcmp(name, channel) == 0)
return child;
}
return NULL;
}
static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
struct rpmsg_channel_info *info)
{
struct rpmsg_device *rpdev;
struct mtk_rpmsg_device *mdev;
struct platform_device *pdev = mtk_subdev->pdev;
int ret;
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
mdev->mtk_subdev = mtk_subdev;
rpdev = &mdev->rpdev;
rpdev->ops = &mtk_rpmsg_device_ops;
rpdev->src = info->src;
rpdev->dst = info->dst;
strscpy(rpdev->id.name, info->name, RPMSG_NAME_SIZE);
rpdev->dev.of_node =
mtk_rpmsg_match_device_subnode(pdev->dev.of_node, info->name);
rpdev->dev.parent = &pdev->dev;
rpdev->dev.release = mtk_rpmsg_release_device;
ret = rpmsg_register_device(rpdev);
if (ret) {
kfree(mdev);
return ret;
}
return 0;
}
static void mtk_register_device_work_function(struct work_struct *register_work)
{
struct mtk_rpmsg_rproc_subdev *subdev = container_of(
register_work, struct mtk_rpmsg_rproc_subdev, register_work);
struct platform_device *pdev = subdev->pdev;
struct mtk_rpmsg_channel_info *info;
int ret;
mutex_lock(&subdev->channels_lock);
list_for_each_entry(info, &subdev->channels, list) {
if (info->registered)
continue;
ret = mtk_rpmsg_register_device(subdev, &info->info);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
}
info->registered = true;
}
mutex_unlock(&subdev->channels_lock);
}
static int mtk_rpmsg_create_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
char *name, u32 addr)
{
struct mtk_rpmsg_channel_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
strscpy(info->info.name, name, RPMSG_NAME_SIZE);
info->info.src = addr;
info->info.dst = RPMSG_ADDR_ANY;
mutex_lock(&mtk_subdev->channels_lock);
list_add(&info->list, &mtk_subdev->channels);
mutex_unlock(&mtk_subdev->channels_lock);
schedule_work(&mtk_subdev->register_work);
return 0;
}
static int mtk_rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
void *priv, u32 src)
{
struct rpmsg_ns_msg *msg = data;
struct mtk_rpmsg_rproc_subdev *mtk_subdev = priv;
struct device *dev = &mtk_subdev->pdev->dev;
int ret;
if (len != sizeof(*msg)) {
dev_err(dev, "malformed ns msg (%d)\n", len);
return -EINVAL;
}
/*
* the name service ept does _not_ belong to a real rpmsg channel,
* and is handled by the rpmsg bus itself.
* for sanity reasons, make sure a valid rpdev has _not_ sneaked
* in somehow.
*/
if (rpdev) {
dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
return -EINVAL;
}
/* don't trust the remote processor for null terminating the name */
msg->name[RPMSG_NAME_SIZE - 1] = '\0';
dev_info(dev, "creating channel %s addr 0x%x\n", msg->name, msg->addr);
ret = mtk_rpmsg_create_device(mtk_subdev, msg->name, msg->addr);
if (ret) {
dev_err(dev, "create rpmsg device failed\n");
return ret;
}
return 0;
}
static int mtk_rpmsg_prepare(struct rproc_subdev *subdev)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
/* a dedicated endpoint handles the name service msgs */
if (mtk_subdev->info->ns_ipi_id >= 0) {
mtk_subdev->ns_ept =
__mtk_create_ept(mtk_subdev, NULL, mtk_rpmsg_ns_cb,
mtk_subdev,
mtk_subdev->info->ns_ipi_id);
if (!mtk_subdev->ns_ept) {
dev_err(&mtk_subdev->pdev->dev,
"failed to create name service endpoint\n");
return -ENOMEM;
}
}
return 0;
}
static void mtk_rpmsg_unprepare(struct rproc_subdev *subdev)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
if (mtk_subdev->ns_ept) {
mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
mtk_subdev->ns_ept = NULL;
}
}
static void mtk_rpmsg_stop(struct rproc_subdev *subdev, bool crashed)
{
struct mtk_rpmsg_channel_info *info, *next;
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
struct device *dev = &mtk_subdev->pdev->dev;
/*
* Destroy the name service endpoint here, to avoid new channel being
* created after the rpmsg_unregister_device loop below.
*/
if (mtk_subdev->ns_ept) {
mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
mtk_subdev->ns_ept = NULL;
}
cancel_work_sync(&mtk_subdev->register_work);
mutex_lock(&mtk_subdev->channels_lock);
list_for_each_entry(info, &mtk_subdev->channels, list) {
if (!info->registered)
continue;
if (rpmsg_unregister_device(dev, &info->info)) {
dev_warn(
dev,
"rpmsg_unregister_device failed for %s.%d.%d\n",
info->info.name, info->info.src,
info->info.dst);
}
}
list_for_each_entry_safe(info, next,
&mtk_subdev->channels, list) {
list_del(&info->list);
kfree(info);
}
mutex_unlock(&mtk_subdev->channels_lock);
}
struct rproc_subdev *
mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
struct mtk_rpmsg_info *info)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev;
mtk_subdev = kzalloc(sizeof(*mtk_subdev), GFP_KERNEL);
if (!mtk_subdev)
return NULL;
mtk_subdev->pdev = pdev;
mtk_subdev->subdev.prepare = mtk_rpmsg_prepare;
mtk_subdev->subdev.stop = mtk_rpmsg_stop;
mtk_subdev->subdev.unprepare = mtk_rpmsg_unprepare;
mtk_subdev->info = info;
INIT_LIST_HEAD(&mtk_subdev->channels);
INIT_WORK(&mtk_subdev->register_work,
mtk_register_device_work_function);
mutex_init(&mtk_subdev->channels_lock);
return &mtk_subdev->subdev;
}
EXPORT_SYMBOL_GPL(mtk_rpmsg_create_rproc_subdev);
void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
kfree(mtk_subdev);
}
EXPORT_SYMBOL_GPL(mtk_rpmsg_destroy_rproc_subdev);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek scp rpmsg driver");
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019 MediaTek Inc.
*/
#ifndef _MTK_SCP_H
#define _MTK_SCP_H
#include <linux/platform_device.h>
typedef void (*scp_ipi_handler_t) (void *data,
unsigned int len,
void *priv);
struct mtk_scp;
/**
* enum ipi_id - the id of inter-processor interrupt
*
* @SCP_IPI_INIT: The interrupt from scp is to notfiy kernel
* SCP initialization completed.
* IPI_SCP_INIT is sent from SCP when firmware is
* loaded. AP doesn't need to send IPI_SCP_INIT
* command to SCP.
* For other IPI below, AP should send the request
* to SCP to trigger the interrupt.
* @SCP_IPI_MAX: The maximum IPI number
*/
enum scp_ipi_id {
SCP_IPI_INIT = 0,
SCP_IPI_VDEC_H264,
SCP_IPI_VDEC_VP8,
SCP_IPI_VDEC_VP9,
SCP_IPI_VENC_H264,
SCP_IPI_VENC_VP8,
SCP_IPI_MDP_INIT,
SCP_IPI_MDP_DEINIT,
SCP_IPI_MDP_FRAME,
SCP_IPI_DIP,
SCP_IPI_ISP_CMD,
SCP_IPI_ISP_FRAME,
SCP_IPI_FD_CMD,
SCP_IPI_CROS_HOST_CMD,
SCP_IPI_NS_SERVICE = 0xFF,
SCP_IPI_MAX = 0x100,
};
struct mtk_scp *scp_get(struct platform_device *pdev);
void scp_put(struct mtk_scp *scp);
struct device *scp_get_device(struct mtk_scp *scp);
struct rproc *scp_get_rproc(struct mtk_scp *scp);
int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler,
void *priv);
void scp_ipi_unregister(struct mtk_scp *scp, u32 id);
int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
unsigned int wait);
unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp);
unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp);
void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr);
#endif /* _MTK_SCP_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 Google LLC.
*/
#ifndef __LINUX_RPMSG_MTK_RPMSG_H
#define __LINUX_RPMSG_MTK_RPMSG_H
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
typedef void (*ipi_handler_t)(void *data, unsigned int len, void *priv);
/*
* struct mtk_rpmsg_info - IPI functions tied to the rpmsg device.
* @register_ipi: register IPI handler for an IPI id.
* @unregister_ipi: unregister IPI handler for a registered IPI id.
* @send_ipi: send IPI to an IPI id. wait is the timeout (in msecs) to wait
* until response, or 0 if there's no timeout.
* @ns_ipi_id: the IPI id used for name service, or -1 if name service isn't
* supported.
*/
struct mtk_rpmsg_info {
int (*register_ipi)(struct platform_device *pdev, u32 id,
ipi_handler_t handler, void *priv);
void (*unregister_ipi)(struct platform_device *pdev, u32 id);
int (*send_ipi)(struct platform_device *pdev, u32 id,
void *buf, unsigned int len, unsigned int wait);
int ns_ipi_id;
};
struct rproc_subdev *
mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
struct mtk_rpmsg_info *info);
void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment