Commit d18292dc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm-drivers-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull ARM driver updates from Arnd Bergmann:
 "These are the usual updates for SoC specific device drivers and
  related subsystems that don't have their own top-level maintainers:

   - ARM SCMI/SCPI updates to allow pluggable transport layers

   - TEE subsystem cleanups

   - A new driver for the Amlogic secure power domain controller

   - Various driver updates for the NXP Layerscape DPAA2, NXP i.MX SCU
     and TI OMAP2+ sysc drivers.

   - Qualcomm SoC driver updates, including a new library module for
     "protection domain" notifications

   - Lots of smaller bugfixes and cleanups in other drivers"

* tag 'arm-drivers-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (70 commits)
  soc: fsl: qe: fix sparse warnings for ucc_slow.c
  soc: fsl: qe: ucc_slow: remove 0 assignment for kzalloc'ed structure
  soc: fsl: qe: fix sparse warnings for ucc_fast.c
  soc: fsl: qe: fix sparse warnings for qe_ic.c
  soc: fsl: qe: fix sparse warnings for ucc.c
  soc: fsl: qe: fix sparse warning for qe_common.c
  soc: fsl: qe: fix sparse warnings for qe.c
  soc: qcom: Fix QCOM_APR dependencies
  soc: qcom: pdr: Avoid uninitialized use of found in pdr_indication_cb
  soc: imx: drop COMPILE_TEST for IMX_SCU_SOC
  firmware: imx: add COMPILE_TEST for IMX_SCU driver
  soc: imx: gpc: fix power up sequencing
  soc: imx: increase build coverage for imx8m soc driver
  soc: qcom: apr: Add avs/audio tracking functionality
  dt-bindings: soc: qcom: apr: Add protection domain bindings
  soc: qcom: Introduce Protection Domain Restart helpers
  devicetree: bindings: firmware: add ipq806x to qcom_scm
  memory: tegra: Correct debugfs clk rate-range on Tegra124
  memory: tegra: Correct debugfs clk rate-range on Tegra30
  memory: tegra: Correct debugfs clk rate-range on Tegra20
  ...
parents 0e8fb69f cedb414a
......@@ -38,6 +38,7 @@ Required standard properties:
"ti,sysc-dra7-mcasp"
"ti,sysc-usb-host-fs"
"ti,sysc-dra7-mcan"
"ti,sysc-pruss"
- reg shall have register areas implemented for the interconnect
target module in question such as revision, sysc and syss
......
......@@ -10,6 +10,7 @@ Required properties:
* "qcom,scm-apq8064"
* "qcom,scm-apq8084"
* "qcom,scm-ipq4019"
* "qcom,scm-ipq806x"
* "qcom,scm-msm8660"
* "qcom,scm-msm8916"
* "qcom,scm-msm8960"
......
# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
# Copyright (c) 2019 Amlogic, Inc
# Author: Jianxin Pan <jianxin.pan@amlogic.com>
%YAML 1.2
---
$id: "http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson Secure Power Domains
maintainers:
- Jianxin Pan <jianxin.pan@amlogic.com>
description: |+
Secure Power Domains used in Meson A1/C1 SoCs, and should be the child node
of secure-monitor.
properties:
compatible:
enum:
- amlogic,meson-a1-pwrc
"#power-domain-cells":
const: 1
required:
- compatible
- "#power-domain-cells"
examples:
- |
secure-monitor {
compatible = "amlogic,meson-gxbb-sm";
pwrc: power-controller {
compatible = "amlogic,meson-a1-pwrc";
#power-domain-cells = <1>;
};
};
......@@ -45,6 +45,18 @@ by the individual bindings for the specific service
12 - Ultrasound stream manager.
13 - Listen stream manager.
- qcom,protection-domain
Usage: optional
Value type: <stringlist>
Definition: Must list the protection domain service name and path
that the particular apr service has a dependency on.
Possible values are :
"avs/audio", "msm/adsp/audio_pd".
"kernel/elf_loader", "msm/modem/wlan_pd".
"tms/servreg", "msm/adsp/audio_pd".
"tms/servreg", "msm/modem/wlan_pd".
"tms/servreg", "msm/slpi/sensor_pd".
= EXAMPLE
The following example represents a QDSP based sound card on a MSM8996 device
which uses apr as communication between Apps and QDSP.
......@@ -82,3 +94,41 @@ which uses apr as communication between Apps and QDSP.
...
};
};
= EXAMPLE 2
The following example represents a QDSP based sound card with protection domain
dependencies specified. Here some of the apr services are dependent on services
running on protection domain hosted on ADSP/SLPI remote processors while others
have no such dependency.
apr {
compatible = "qcom,apr-v2";
qcom,glink-channels = "apr_audio_svc";
qcom,apr-domain = <APR_DOMAIN_ADSP>;
q6core {
compatible = "qcom,q6core";
reg = <APR_SVC_ADSP_CORE>;
};
q6afe: q6afe {
compatible = "qcom,q6afe";
reg = <APR_SVC_AFE>;
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
...
};
q6asm: q6asm {
compatible = "qcom,q6asm";
reg = <APR_SVC_ASM>;
qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd";
...
};
q6adm: q6adm {
compatible = "qcom,q6adm";
reg = <APR_SVC_ADM>;
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
...
};
};
......@@ -397,10 +397,16 @@ static int ti_sysc_shutdown_module(struct device *dev,
return omap_hwmod_shutdown(cookie->data);
}
static bool ti_sysc_soc_type_gp(void)
{
return omap_type() == OMAP2_DEVICE_TYPE_GP;
}
static struct of_dev_auxdata omap_auxdata_lookup[];
static struct ti_sysc_platform_data ti_sysc_pdata = {
.auxdata = omap_auxdata_lookup,
.soc_type_gp = ti_sysc_soc_type_gp,
.init_clockdomain = ti_sysc_clkdm_init,
.clkdm_deny_idle = ti_sysc_clkdm_deny_idle,
.clkdm_allow_idle = ti_sysc_clkdm_allow_idle,
......
......@@ -357,6 +357,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
return 0;
}
/*
* Released firmware describes the IO port max address as 0x3fff, which is
* the max host bus address. Fixup to a proper range. This will probably
* never be fixed in firmware.
*/
static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
struct resource *r)
{
if (r->end != 0x3fff)
return;
if (r->start == 0xe4)
r->end = 0xe4 + 0x04 - 1;
else if (r->start == 0x2f8)
r->end = 0x2f8 + 0x08 - 1;
else
dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
r);
}
/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
* @child: the device node to be updated the I/O resource
......@@ -418,8 +438,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
return -ENOMEM;
}
count = 0;
list_for_each_entry(rentry, &resource_list, node)
resources[count++] = *rentry->res;
list_for_each_entry(rentry, &resource_list, node) {
resources[count] = *rentry->res;
hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
count++;
}
acpi_dev_free_resource_list(&resource_list);
......
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0-only
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
scmi-transport-y = mailbox.o shmem.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
......@@ -33,8 +33,8 @@ enum scmi_common_cmd {
/**
* struct scmi_msg_resp_prot_version - Response for a message
*
* @major_version: Major version of the ABI that firmware supports
* @minor_version: Minor version of the ABI that firmware supports
* @major_version: Major version of the ABI that firmware supports
*
* In general, ABI version changes follow the rule that minor version increments
* are backward compatible. Major revision changes in ABI may not be
......@@ -47,6 +47,19 @@ struct scmi_msg_resp_prot_version {
__le16 major_version;
};
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
#define MSG_TYPE_COMMAND 0
#define MSG_TYPE_DELAYED_RESP 2
#define MSG_TYPE_NOTIFICATION 3
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
......@@ -67,6 +80,33 @@ struct scmi_msg_hdr {
bool poll_completion;
};
/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*
* Return: 32-bit packed message header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
return FIELD_PREP(MSG_ID_MASK, hdr->id) |
FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
}
/**
* unpack_scmi_header() - unpacks and records message and protocol id
*
* @msg_hdr: 32-bit packed message header sent from the platform
* @hdr: pointer to header to fetch message and protocol id.
*/
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
{
hdr->id = MSG_XTRACT_ID(msg_hdr);
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
}
/**
* struct scmi_msg - Message(Tx/Rx) structure
*
......@@ -88,7 +128,7 @@ struct scmi_msg {
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: command message transmit completion event
* @async: pointer to delayed response message received event completion
* @async_done: pointer to delayed response message received event completion
*/
struct scmi_xfer {
int transfer_id;
......@@ -113,3 +153,74 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
u8 *prot_imp);
int scmi_base_protocol_init(struct scmi_handle *h);
/* SCMI Transport */
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
*
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @handle: Pointer to SCMI entity handle
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
struct device *dev;
struct scmi_handle *handle;
void *transport_info;
};
/**
* struct scmi_transport_ops - Structure representing a SCMI transport ops
*
* @chan_available: Callback to check if channel is available or not
* @chan_setup: Callback to allocate and setup a channel
* @chan_free: Callback to free a channel
* @send_message: Callback to send a message
* @mark_txdone: Callback to mark tx as done
* @fetch_response: Callback to fetch response
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
bool (*chan_available)(struct device *dev, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
int (*chan_free)(int id, void *p, void *data);
int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
/**
* struct scmi_desc - Description of SoC integration
*
* @ops: Pointer to the transport specific ops structure
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msg: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
*/
struct scmi_desc {
struct scmi_transport_ops *ops;
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
};
extern const struct scmi_desc scmi_mailbox_desc;
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
/* shmem related declarations */
struct scmi_shared_mem;
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Mailbox Transport
* driver.
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/mailbox_client.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include "common.h"
/**
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
*/
struct scmi_mailbox {
struct mbox_client cl;
struct mbox_chan *chan;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
shmem_tx_prepare(smbox->shmem, m);
}
static void rx_callback(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
}
static bool mailbox_chan_available(struct device *dev, int idx)
{
return !of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", idx, NULL);
}
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
const char *desc = tx ? "Tx" : "Rx";
struct device *cdev = cinfo->dev;
struct scmi_mailbox *smbox;
struct device_node *shmem;
int ret, idx = tx ? 0 : 1;
struct mbox_client *cl;
resource_size_t size;
struct resource res;
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
return ret;
}
size = resource_size(&res);
smbox->shmem = devm_ioremap(dev, res.start, size);
if (!smbox->shmem) {
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
return -EADDRNOTAVAIL;
}
cl = &smbox->cl;
cl->dev = cdev;
cl->tx_prepare = tx ? tx_prepare : NULL;
cl->rx_callback = rx_callback;
cl->tx_block = false;
cl->knows_txdone = tx;
smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
if (IS_ERR(smbox->chan)) {
ret = PTR_ERR(smbox->chan);
if (ret != -EPROBE_DEFER)
dev_err(cdev, "failed to request SCMI %s mailbox\n",
tx ? "Tx" : "Rx");
return ret;
}
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
return 0;
}
static int mailbox_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_mailbox *smbox = cinfo->transport_info;
if (!IS_ERR(smbox->chan)) {
mbox_free_channel(smbox->chan);
cinfo->transport_info = NULL;
smbox->chan = NULL;
smbox->cinfo = NULL;
}
scmi_free_channel(cinfo, data, id);
return 0;
}
static int mailbox_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
int ret;
ret = mbox_send_message(smbox->chan, xfer);
/* mbox_send_message returns non-negative value on success, so reset */
if (ret > 0)
ret = 0;
return ret;
}
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(smbox->chan, ret);
}
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_fetch_response(smbox->shmem, xfer);
}
static bool
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
return shmem_poll_done(smbox->shmem, xfer);
}
static struct scmi_transport_ops scmi_mailbox_ops = {
.chan_available = mailbox_chan_available,
.chan_setup = mailbox_chan_setup,
.chan_free = mailbox_chan_free,
.send_message = mailbox_send_message,
.mark_txdone = mailbox_mark_txdone,
.fetch_response = mailbox_fetch_response,
.poll_done = mailbox_poll_done,
};
const struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
......@@ -89,7 +89,7 @@ struct scmi_msg_resp_perf_describe_levels {
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
} opp[0];
} opp[];
};
struct scmi_perf_get_fc_info {
......
// SPDX-License-Identifier: GPL-2.0
/*
* For transport using shared mem structure.
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/io.h>
#include <linux/processor.h>
#include <linux/types.h>
#include "common.h"
/*
* SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian
* format only.
*/
struct scmi_shared_mem {
__le32 reserved;
__le32 channel_status;
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
__le32 reserved1[2];
__le32 flags;
#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
__le32 length;
__le32 msg_header;
u8 msg_payload[];
};
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same, wait
* until it releases the shared memory, otherwise we may endup
* overwriting its response with new message payload or vice-versa
*/
spin_until_cond(ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
/* Mark channel busy + clear error */
iowrite32(0x0, &shmem->channel_status);
iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
&shmem->flags);
iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
if (xfer->tx.buf)
memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
}
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
{
return ioread32(&shmem->msg_header);
}
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
xfer->hdr.status = ioread32(shmem->msg_payload);
/* Skip the length of header and status in shmem area i.e 8 bytes */
xfer->rx.len = min_t(size_t, xfer->rx.len,
ioread32(&shmem->length) - 8);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
u16 xfer_id;
xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
if (xfer->hdr.seq != xfer_id)
return false;
return ioread32(&shmem->channel_status) &
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
......@@ -262,12 +262,12 @@ struct scpi_drvinfo {
struct scpi_shared_mem {
__le32 command;
__le32 status;
u8 payload[0];
u8 payload[];
} __packed;
struct legacy_scpi_shared_mem {
__le32 status;
u8 payload[0];
u8 payload[];
} __packed;
struct scp_capabilities {
......
......@@ -93,7 +93,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
{ "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
{ "mu_b", IMX_SC_R_MU_5B, 9, true, 5 },
/* CONN SS */
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
......@@ -109,6 +109,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
{ "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
......@@ -116,7 +117,13 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
{ "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
{ "sai3", IMX_SC_R_SAI_3, 1, false, 0 },
{ "sai4", IMX_SC_R_SAI_4, 1, false, 0 },
{ "sai5", IMX_SC_R_SAI_5, 1, false, 0 },
{ "sai6", IMX_SC_R_SAI_6, 1, false, 0 },
{ "sai7", IMX_SC_R_SAI_7, 1, false, 0 },
{ "amix", IMX_SC_R_AMIX, 1, false, 0 },
{ "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
{ "dsp", IMX_SC_R_DSP, 1, false, 0 },
......@@ -158,6 +165,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
/* CM40 SS */
{ "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 },
{ "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
......
......@@ -44,6 +44,8 @@ static const struct meson_sm_chip gxbb_chip = {
CMD(SM_EFUSE_WRITE, 0x82000031),
CMD(SM_EFUSE_USER_MAX, 0x82000033),
CMD(SM_GET_CHIP_ID, 0x82000044),
CMD(SM_A1_PWRC_SET, 0x82000093),
CMD(SM_A1_PWRC_GET, 0x82000095),
{ /* sentinel */ },
},
};
......
......@@ -7,7 +7,7 @@ config TEGRA_IVC
help
IVC (Inter-VM Communication) protocol is part of the IPC
(Inter Processor Communication) framework on Tegra. It maintains the
data and the different commuication channels in SysRAM or RAM and
data and the different communication channels in SysRAM or RAM and
keeps the content is synchronization between host CPU and remote
processors.
......
......@@ -1348,9 +1348,15 @@ static int dss_component_compare(struct device *dev, void *data)
return dev == child;
}
struct dss_component_match_data {
struct device *dev;
struct component_match **match;
};
static int dss_add_child_component(struct device *dev, void *data)
{
struct component_match **match = data;
struct dss_component_match_data *cmatch = data;
struct component_match **match = cmatch->match;
/*
* HACK
......@@ -1361,7 +1367,17 @@ static int dss_add_child_component(struct device *dev, void *data)
if (strstr(dev_name(dev), "rfbi"))
return 0;
component_match_add(dev->parent, match, dss_component_compare, dev);
/*
* Handle possible interconnect target modules defined within the DSS.
* The DSS components can be children of an interconnect target module
* after the device tree has been updated for the module data.
* See also omapdss_boot_init() for compatible fixup.
*/
if (strstr(dev_name(dev), "target-module"))
return device_for_each_child(dev, cmatch,
dss_add_child_component);
component_match_add(cmatch->dev, match, dss_component_compare, dev);
return 0;
}
......@@ -1404,6 +1420,7 @@ static int dss_probe_hardware(struct dss_device *dss)
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
struct dss_component_match_data cmatch;
struct component_match *match = NULL;
struct resource *dss_mem;
struct dss_device *dss;
......@@ -1481,7 +1498,9 @@ static int dss_probe(struct platform_device *pdev)
omapdss_gather_components(&pdev->dev);
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
cmatch.dev = &pdev->dev;
cmatch.match = &match;
device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
......
......@@ -178,9 +178,24 @@ static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
{},
};
static void __init omapdss_find_children(struct device_node *np)
{
struct device_node *child;
for_each_available_child_of_node(np, child) {
if (!of_find_property(child, "compatible", NULL))
continue;
omapdss_walk_device(child, true);
if (of_device_is_compatible(child, "ti,sysc"))
omapdss_find_children(child);
}
}
static int __init omapdss_boot_init(void)
{
struct device_node *dss, *child;
struct device_node *dss;
INIT_LIST_HEAD(&dss_conv_list);
......@@ -190,13 +205,7 @@ static int __init omapdss_boot_init(void)
goto put_node;
omapdss_walk_device(dss, true);
for_each_available_child_of_node(dss, child) {
if (!of_find_property(child, "compatible", NULL))
continue;
omapdss_walk_device(child, true);
}
omapdss_find_children(dss);
while (!list_empty(&dss_conv_list)) {
struct dss_conv_node *n;
......
......@@ -1158,6 +1158,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
if (!emc->num_timings) {
emc->debugfs.min_rate = clk_get_rate(emc->clk);
emc->debugfs.max_rate = emc->debugfs.min_rate;
}
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
......
......@@ -628,6 +628,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
if (!emc->num_timings) {
emc->debugfs.min_rate = clk_get_rate(emc->clk);
emc->debugfs.max_rate = emc->debugfs.min_rate;
}
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
......
......@@ -1256,6 +1256,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
if (!emc->num_timings) {
emc->debugfs.min_rate = clk_get_rate(emc->clk);
emc->debugfs.max_rate = emc->debugfs.min_rate;
}
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
......
......@@ -11,7 +11,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-y += imx/
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
......
......@@ -48,6 +48,19 @@ config MESON_EE_PM_DOMAINS
Say yes to expose Amlogic Meson Everything-Else Power Domains as
Generic Power Domains.
config MESON_SECURE_PM_DOMAINS
bool "Amlogic Meson Secure Power Domains driver"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
depends on PM && OF
depends on HAVE_ARM_SMCCC
default ARCH_MESON
select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF
help
Support for the power controller on Amlogic A1/C1 series.
Say yes to expose Amlogic Meson Secure Power Domains as Generic
Power Domains.
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
depends on ARCH_MESON || COMPILE_TEST
......
......@@ -5,3 +5,4 @@ obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <dt-bindings/power/meson-a1-power.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
#define PWRC_ON 1
#define PWRC_OFF 0
struct meson_secure_pwrc_domain {
struct generic_pm_domain base;
unsigned int index;
struct meson_secure_pwrc *pwrc;
};
struct meson_secure_pwrc {
struct meson_secure_pwrc_domain *domains;
struct genpd_onecell_data xlate;
struct meson_sm_firmware *fw;
};
struct meson_secure_pwrc_domain_desc {
unsigned int index;
unsigned int flags;
char *name;
bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain);
};
struct meson_secure_pwrc_domain_data {
unsigned int count;
struct meson_secure_pwrc_domain_desc *domains;
};
static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
{
int is_off = 1;
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off,
pwrc_domain->index, 0, 0, 0, 0) < 0)
pr_err("failed to get power domain status\n");
return is_off;
}
static int meson_secure_pwrc_off(struct generic_pm_domain *domain)
{
int ret = 0;
struct meson_secure_pwrc_domain *pwrc_domain =
container_of(domain, struct meson_secure_pwrc_domain, base);
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) {
pr_err("failed to set power domain off\n");
ret = -EINVAL;
}
return ret;
}
static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
{
int ret = 0;
struct meson_secure_pwrc_domain *pwrc_domain =
container_of(domain, struct meson_secure_pwrc_domain, base);
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) {
pr_err("failed to set power domain on\n");
ret = -EINVAL;
}
return ret;
}
#define SEC_PD(__name, __flag) \
[PWRC_##__name##_ID] = \
{ \
.name = #__name, \
.index = PWRC_##__name##_ID, \
.is_off = pwrc_secure_is_off, \
.flags = __flag, \
}
static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
SEC_PD(DSPA, 0),
SEC_PD(DSPB, 0),
/* UART should keep working in ATF after suspend and before resume */
SEC_PD(UART, GENPD_FLAG_ALWAYS_ON),
/* DMC is for DDR PHY ana/dig and DMC, and should be always on */
SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON),
SEC_PD(I2C, 0),
SEC_PD(PSRAM, 0),
SEC_PD(ACODEC, 0),
SEC_PD(AUDIO, 0),
SEC_PD(OTP, 0),
SEC_PD(DMA, 0),
SEC_PD(SD_EMMC, 0),
SEC_PD(RAMA, 0),
/* SRAMB is used as ATF runtime memory, and should be always on */
SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON),
SEC_PD(IR, 0),
SEC_PD(SPICC, 0),
SEC_PD(SPIFC, 0),
SEC_PD(USB, 0),
/* NIC is for the Arm NIC-400 interconnect, and should be always on */
SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON),
SEC_PD(PDMIN, 0),
SEC_PD(RSA, 0),
};
static int meson_secure_pwrc_probe(struct platform_device *pdev)
{
int i;
struct device_node *sm_np;
struct meson_secure_pwrc *pwrc;
const struct meson_secure_pwrc_domain_data *match;
match = of_device_get_match_data(&pdev->dev);
if (!match) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm");
if (!sm_np) {
dev_err(&pdev->dev, "no secure-monitor node\n");
return -ENODEV;
}
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
if (!pwrc)
return -ENOMEM;
pwrc->fw = meson_sm_get(sm_np);
of_node_put(sm_np);
if (!pwrc->fw)
return -EPROBE_DEFER;
pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
sizeof(*pwrc->xlate.domains),
GFP_KERNEL);
if (!pwrc->xlate.domains)
return -ENOMEM;
pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
sizeof(*pwrc->domains), GFP_KERNEL);
if (!pwrc->domains)
return -ENOMEM;
pwrc->xlate.num_domains = match->count;
platform_set_drvdata(pdev, pwrc);
for (i = 0 ; i < match->count ; ++i) {
struct meson_secure_pwrc_domain *dom = &pwrc->domains[i];
if (!match->domains[i].index)
continue;
dom->pwrc = pwrc;
dom->index = match->domains[i].index;
dom->base.name = match->domains[i].name;
dom->base.flags = match->domains[i].flags;
dom->base.power_on = meson_secure_pwrc_on;
dom->base.power_off = meson_secure_pwrc_off;
pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom));
pwrc->xlate.domains[i] = &dom->base;
}
return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
.domains = a1_pwrc_domains,
.count = ARRAY_SIZE(a1_pwrc_domains),
};
static const struct of_device_id meson_secure_pwrc_match_table[] = {
{
.compatible = "amlogic,meson-a1-pwrc",
.data = &meson_secure_a1_pwrc_data,
},
{ /* sentinel */ }
};
static struct platform_driver meson_secure_pwrc_driver = {
.probe = meson_secure_pwrc_probe,
.driver = {
.name = "meson_secure_pwrc",
.of_match_table = meson_secure_pwrc_match_table,
},
};
builtin_platform_driver(meson_secure_pwrc_driver);
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
* Copyright 2016-2019 NXP
*
*/
#include <linux/types.h>
......@@ -432,6 +432,69 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
/**
* dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
* to a frame queue using one fqid.
* @d: the given DPIO service.
* @fqid: the given frame queue id.
* @fd: the frame descriptor which is enqueued.
* @nb: number of frames to be enqueud
*
* Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
* or -ENODEV if there is no dpio service.
*/
int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
u32 fqid,
const struct dpaa2_fd *fd,
int nb)
{
struct qbman_eq_desc ed;
d = service_select(d);
if (!d)
return -ENODEV;
qbman_eq_desc_clear(&ed);
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
/**
* dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
* to different frame queue using a list of fqids.
* @d: the given DPIO service.
* @fqid: the given list of frame queue ids.
* @fd: the frame descriptor which is enqueued.
* @nb: number of frames to be enqueud
*
* Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
* or -ENODEV if there is no dpio service.
*/
int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
u32 *fqid,
const struct dpaa2_fd *fd,
int nb)
{
int i;
struct qbman_eq_desc ed[32];
d = service_select(d);
if (!d)
return -ENODEV;
for (i = 0; i < nb; i++) {
qbman_eq_desc_clear(&ed[i]);
qbman_eq_desc_set_no_orp(&ed[i], 0);
qbman_eq_desc_set_fq(&ed[i], fqid[i]);
}
return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
......@@ -526,7 +589,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
/**
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
* @max_frames: the maximum number of dequeued result for frames, must be <= 16.
* @max_frames: the maximum number of dequeued result for frames, must be <= 32.
* @dev: the device to allow mapping/unmapping the DMAable region.
*
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
......@@ -541,7 +604,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
struct dpaa2_io_store *ret;
size_t size;
if (!max_frames || (max_frames > 16))
if (!max_frames || (max_frames > 32))
return NULL;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
......
This diff is collapsed.
......@@ -9,6 +9,13 @@
#include <soc/fsl/dpaa2-fd.h>
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
#define QMAN_REV_5000 0x05000000
#define QMAN_REV_MASK 0xffff0000
struct dpaa2_dq;
struct qbman_swp;
......@@ -81,6 +88,10 @@ struct qbman_eq_desc {
u8 wae;
u8 rspid;
__le64 rsp_addr;
};
struct qbman_eq_desc_with_fd {
struct qbman_eq_desc desc;
u8 fd[32];
};
......@@ -132,8 +143,48 @@ struct qbman_swp {
u8 dqrr_size;
int reset_bug; /* indicates dqrr reset workaround is needed */
} dqrr;
struct {
u32 pi;
u32 pi_vb;
u32 pi_ring_size;
u32 pi_ci_mask;
u32 ci;
int available;
u32 pend;
u32 no_pfdr;
} eqcr;
spinlock_t access_spinlock;
};
/* Function pointers */
extern
int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
extern
int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames);
extern
int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames);
extern
int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
extern
const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
extern
int (*qbman_swp_release_ptr)(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers);
/* Functions */
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
......@@ -158,9 +209,6 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
enum qbman_pull_type_e dct);
int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
......@@ -172,15 +220,11 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
const u64 *buffers, unsigned int num_buffers);
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
unsigned int num_buffers);
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
......@@ -193,6 +237,61 @@ void *qbman_swp_mc_start(struct qbman_swp *p);
void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
void *qbman_swp_mc_result(struct qbman_swp *p);
/**
* qbman_swp_enqueue() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
static inline int
qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd)
{
return qbman_swp_enqueue_ptr(s, d, fd);
}
/**
* qbman_swp_enqueue_multiple() - Issue a multi enqueue command
* using one enqueue descriptor
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static inline int
qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames)
{
return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
}
/**
* qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
* using multiple enqueue descriptor
* @s: the software portal used for enqueue
* @d: table of minimal enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static inline int
qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames)
{
return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
}
/**
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
* @dq: the dequeue result to be checked
......@@ -504,4 +603,49 @@ int qbman_bp_query(struct qbman_swp *s, u16 bpid,
u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
/**
* qbman_swp_release() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
* @num_buffers: number of buffers to be released, must be less than 8
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
static inline int qbman_swp_release(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers)
{
return qbman_swp_release_ptr(s, d, buffers, num_buffers);
}
/**
* qbman_swp_pull() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
*
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
static inline int qbman_swp_pull(struct qbman_swp *s,
struct qbman_pull_desc *d)
{
return qbman_swp_pull_ptr(s, d);
}
/**
* qbman_swp_dqrr_next() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
{
return qbman_swp_dqrr_next_ptr(s);
}
#endif /* __FSL_QBMAN_PORTAL_H */
......@@ -423,7 +423,7 @@ static void qe_upload_microcode(const void *base,
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
/*
......@@ -525,7 +525,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
qe_firmware_info.extended_modes = firmware->extended_modes;
qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));
......
......@@ -46,7 +46,7 @@ int cpm_muram_init(void)
{
struct device_node *np;
struct resource r;
u32 zero[OF_MAX_ADDR_CELLS] = {};
__be32 zero[OF_MAX_ADDR_CELLS] = {};
resource_size_t max = 0;
int i = 0;
int ret = 0;
......
......@@ -44,7 +44,7 @@
struct qe_ic {
/* Control registers offset */
u32 __iomem *regs;
__be32 __iomem *regs;
/* The remapper for this QEIC */
struct irq_domain *irqhost;
......
......@@ -632,7 +632,7 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
{
int source;
u32 shift;
struct qe_mux *qe_mux_reg;
struct qe_mux __iomem *qe_mux_reg;
qe_mux_reg = &qe_immr->qmx;
......
......@@ -72,7 +72,7 @@ EXPORT_SYMBOL(ucc_slow_restart_tx);
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
struct ucc_slow *us_regs;
struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
......@@ -93,7 +93,7 @@ EXPORT_SYMBOL(ucc_slow_enable);
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
struct ucc_slow *us_regs;
struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
......@@ -122,7 +122,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
u32 i;
struct ucc_slow __iomem *us_regs;
u32 gumr;
struct qe_bd *bd;
struct qe_bd __iomem *bd;
u32 id;
u32 command;
int ret = 0;
......@@ -168,16 +168,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return -ENOMEM;
}
uccs->saved_uccm = 0;
uccs->p_rx_frame = 0;
us_regs = uccs->us_regs;
uccs->p_ucce = (u16 *) & (us_regs->ucce);
uccs->p_uccm = (u16 *) & (us_regs->uccm);
#ifdef STATISTICS
uccs->rx_frames = 0;
uccs->tx_frames = 0;
uccs->rx_discarded = 0;
#endif /* STATISTICS */
uccs->p_ucce = &us_regs->ucce;
uccs->p_uccm = &us_regs->uccm;
/* Get PRAM base */
uccs->us_pram_offset =
......@@ -231,24 +224,24 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
qe_iowrite32be(0, (u32 *)bd);
qe_iowrite32be(0, (u32 __iomem *)bd);
bd++;
}
/* for last BD set Wrap bit */
qe_iowrite32be(0, &bd->buf);
qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
qe_iowrite32be(T_W, (u32 __iomem *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
qe_iowrite32be(0, (u32 *)bd);
qe_iowrite32be(0, (u32 __iomem *)bd);
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
qe_iowrite32be(R_W, (u32 __iomem *)bd);
qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
......@@ -273,8 +266,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
us_info->diag | us_info->mode;
gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
(u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
if (us_info->tci)
gumr |= UCC_SLOW_GUMR_L_TCI;
if (us_info->rinv)
......@@ -289,8 +282,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
uccs->us_pram->tbmr = UCC_BMR_BO_BE;
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
/* rbase, tbase are offsets from MURAM base */
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
......
......@@ -10,11 +10,20 @@ config IMX_GPCV2_PM_DOMAINS
config IMX_SCU_SOC
bool "i.MX System Controller Unit SoC info support"
depends on IMX_SCU || COMPILE_TEST
depends on IMX_SCU
select SOC_BUS
help
If you say yes here you get support for the NXP i.MX System
Controller Unit SoC info module, it will provide the SoC info
like SoC family, ID and revision etc.
config SOC_IMX8M
bool "i.MX8M SoC family support"
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
ID and revision etc.
endmenu
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
......@@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
{
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
int i, ret, sw, sw2iso;
u32 val;
int i, ret;
u32 val, req;
if (pd->supply) {
ret = regulator_enable(pd->supply);
......@@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
0x1, 0x1);
/* Read ISO and ISO2SW power up delays */
regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
sw = val & 0x3f;
sw2iso = (val >> 8) & 0x3f;
/* Request GPC to power up domain */
val = BIT(pd->cntr_pdn_bit + 1);
regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
req = BIT(pd->cntr_pdn_bit + 1);
regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
/* Wait ISO + ISO2SW IPG clock cycles */
udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
/* Wait for the PGC to handle the request */
ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
1, 50);
if (ret)
pr_err("powerup request on domain %s timed out\n", genpd->name);
/* Wait for reset to propagate through peripherals */
usleep_range(5, 10);
/* Disable reset clocks for all devices in the domain */
for (i = 0; i < pd->num_clks; i++)
......@@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = {
.rd_table = &access_table,
.wr_table = &access_table,
.max_register = 0x2ac,
.fast_io = true,
};
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
......
......@@ -14,6 +14,7 @@
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
......
......@@ -76,6 +76,10 @@ config QCOM_OCMEM
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
config QCOM_PDR_HELPERS
tristate
select QCOM_QMI_HELPERS
config QCOM_PM
bool "Qualcomm Power Management"
depends on ARCH_QCOM && !ARM64
......@@ -88,7 +92,6 @@ config QCOM_PM
config QCOM_QMI_HELPERS
tristate
depends on ARCH_QCOM || COMPILE_TEST
depends on NET
config QCOM_RMTFS_MEM
......@@ -197,6 +200,8 @@ config QCOM_APR
tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
depends on NET
select QCOM_PDR_HELPERS
help
Enable APR IPC protocol support between
application processor and QDSP6. APR is
......
......@@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
......
......@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/of_device.h>
#include <linux/soc/qcom/apr.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/rpmsg.h>
#include <linux/of.h>
......@@ -21,6 +22,7 @@ struct apr {
spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
struct pdr_handle *pdr;
struct workqueue_struct *rxwq;
struct work_struct rx_work;
struct list_head rx_list;
......@@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np,
id->svc_id + 1, GFP_ATOMIC);
spin_unlock(&apr->svcs_lock);
of_property_read_string_index(np, "qcom,protection-domain",
1, &adev->service_path);
dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
ret = device_register(&adev->dev);
......@@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np,
return ret;
}
static void of_register_apr_devices(struct device *dev)
static int of_apr_add_pd_lookups(struct device *dev)
{
const char *service_name, *service_path;
struct apr *apr = dev_get_drvdata(dev);
struct device_node *node;
struct pdr_service *pds;
int ret;
for_each_child_of_node(dev->of_node, node) {
ret = of_property_read_string_index(node, "qcom,protection-domain",
0, &service_name);
if (ret < 0)
continue;
ret = of_property_read_string_index(node, "qcom,protection-domain",
1, &service_path);
if (ret < 0) {
dev_err(dev, "pdr service path missing: %d\n", ret);
return ret;
}
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
dev_err(dev, "pdr add lookup failed: %d\n", ret);
return PTR_ERR(pds);
}
}
return 0;
}
static void of_register_apr_devices(struct device *dev, const char *svc_path)
{
struct apr *apr = dev_get_drvdata(dev);
struct device_node *node;
const char *service_path;
int ret;
for_each_child_of_node(dev->of_node, node) {
struct apr_device_id id = { {0} };
/*
* This function is called with svc_path NULL during
* apr_probe(), in which case we register any apr devices
* without a qcom,protection-domain specified.
*
* Then as the protection domains becomes available
* (if applicable) this function is again called, but with
* svc_path representing the service becoming available. In
* this case we register any apr devices with a matching
* qcom,protection-domain.
*/
ret = of_property_read_string_index(node, "qcom,protection-domain",
1, &service_path);
if (svc_path) {
/* skip APR services that are PD independent */
if (ret)
continue;
/* skip APR services whose PD paths don't match */
if (strcmp(service_path, svc_path))
continue;
} else {
/* skip APR services whose PD lookups are registered */
if (ret == 0)
continue;
}
if (of_property_read_u32(node, "reg", &id.svc_id))
continue;
......@@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev)
}
}
static int apr_remove_device(struct device *dev, void *svc_path)
{
struct apr_device *adev = to_apr_device(dev);
if (svc_path && adev->service_path) {
if (!strcmp(adev->service_path, (char *)svc_path))
device_unregister(&adev->dev);
} else {
device_unregister(&adev->dev);
}
return 0;
}
static void apr_pd_status(int state, char *svc_path, void *priv)
{
struct apr *apr = (struct apr *)priv;
switch (state) {
case SERVREG_SERVICE_STATE_UP:
of_register_apr_devices(apr->dev, svc_path);
break;
case SERVREG_SERVICE_STATE_DOWN:
device_for_each_child(apr->dev, svc_path, apr_remove_device);
break;
}
}
static int apr_probe(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
......@@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
}
INIT_WORK(&apr->rx_work, apr_rxwq);
apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
if (IS_ERR(apr->pdr)) {
dev_err(dev, "Failed to init PDR handle\n");
ret = PTR_ERR(apr->pdr);
goto destroy_wq;
}
INIT_LIST_HEAD(&apr->rx_list);
spin_lock_init(&apr->rx_lock);
spin_lock_init(&apr->svcs_lock);
idr_init(&apr->svcs_idr);
of_register_apr_devices(dev);
return 0;
}
static int apr_remove_device(struct device *dev, void *null)
{
struct apr_device *adev = to_apr_device(dev);
ret = of_apr_add_pd_lookups(dev);
if (ret)
goto handle_release;
device_unregister(&adev->dev);
of_register_apr_devices(dev, NULL);
return 0;
handle_release:
pdr_handle_release(apr->pdr);
destroy_wq:
destroy_workqueue(apr->rxwq);
return ret;
}
static void apr_remove(struct rpmsg_device *rpdev)
{
struct apr *apr = dev_get_drvdata(&rpdev->dev);
pdr_handle_release(apr->pdr);
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
flush_workqueue(apr->rxwq);
destroy_workqueue(apr->rxwq);
......
This diff is collapsed.
This diff is collapsed.
......@@ -200,7 +200,7 @@ static irqreturn_t qmp_intr(int irq, void *data)
{
struct qmp *qmp = data;
wake_up_interruptible_all(&qmp->event);
wake_up_all(&qmp->event);
return IRQ_HANDLED;
}
......@@ -225,6 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp)
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
......@@ -239,6 +240,9 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
data, len / sizeof(u32));
writel(len, qmp->msgram + qmp->offset);
/* Read back len to confirm data written in message RAM */
tlen = readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
......
......@@ -110,5 +110,6 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
int rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg, int r);
int rpmh_flush(struct rpmh_ctrlr *ctrlr);
#endif /* __RPM_INTERNAL_H__ */
......@@ -277,7 +277,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
......
......@@ -23,7 +23,7 @@
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
struct rpmh_request name = { \
.msg = { \
.state = s, \
......@@ -33,7 +33,7 @@
}, \
.cmd = { { 0 } }, \
.completion = q, \
.dev = dev, \
.dev = device, \
.needs_free = false, \
}
......@@ -427,11 +427,10 @@ static int is_req_valid(struct cache_req *req)
req->sleep_val != req->wake_val);
}
static int send_single(const struct device *dev, enum rpmh_state state,
static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
u32 addr, u32 data)
{
DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
......@@ -445,7 +444,7 @@ static int send_single(const struct device *dev, enum rpmh_state state,
/**
* rpmh_flush: Flushes the buffered active and sleep sets to TCS
*
* @dev: The device making the request
* @ctrlr: controller making request to flush cached data
*
* Return: -EBUSY if the controller is busy, probably waiting on a response
* to a RPMH request sent earlier.
......@@ -454,10 +453,9 @@ static int send_single(const struct device *dev, enum rpmh_state state,
* that is powering down the entire system. Since no other RPMH API would be
* executing at this time, it is safe to run lockless.
*/
int rpmh_flush(const struct device *dev)
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
int ret;
if (!ctrlr->dirty) {
......@@ -480,11 +478,12 @@ int rpmh_flush(const struct device *dev)
__func__, p->addr, p->sleep_val, p->wake_val);
continue;
}
ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
p->sleep_val);
if (ret)
return ret;
ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
p->addr, p->wake_val);
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
p->wake_val);
if (ret)
return ret;
}
......@@ -493,7 +492,6 @@ int rpmh_flush(const struct device *dev)
return 0;
}
EXPORT_SYMBOL(rpmh_flush);
/**
* rpmh_invalidate: Invalidate all sleep and active sets
......
......@@ -277,7 +277,7 @@ static int show_image_##type(struct seq_file *seq, void *p) \
{ \
struct smem_image_version *image_version = seq->private; \
seq_puts(seq, image_version->type); \
seq_puts(seq, "\n"); \
seq_putc(seq, '\n'); \
return 0; \
} \
static int open_image_##type(struct inode *inode, struct file *file) \
......
......@@ -195,19 +195,19 @@ config ARCH_R8A774C0
This enables support for the Renesas RZ/G2E SoC.
config ARCH_R8A77950
bool
bool "Renesas R-Car H3 ES1.x SoC Platform"
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
This enables support for the Renesas R-Car H3 SoC (revision 1.x).
config ARCH_R8A77951
bool
config ARCH_R8A7795
bool "Renesas R-Car H3 SoC Platform"
select ARCH_R8A77950
select ARCH_R8A77951
bool "Renesas R-Car H3 ES2.0+ SoC Platform"
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
This enables support for the Renesas R-Car H3 SoC.
This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
later).
config ARCH_R8A77960
bool "Renesas R-Car M3-W SoC Platform"
......
/* SPDX-License-Identifier: GPL-2.0
*
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas R-Car System Controller
*
* Copyright (C) 2016 Glider bvba
......
......@@ -259,7 +259,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7794
{ .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
#endif
#ifdef CONFIG_ARCH_R8A7795
#if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951)
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
......
......@@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev)
kref_init(&ctx->refcount);
ctx->teedev = teedev;
INIT_LIST_HEAD(&ctx->list_shm);
rc = teedev->desc->ops->open(ctx);
if (rc)
goto err;
......
......@@ -37,7 +37,8 @@ struct tee_shm_pool {
* @num_users: number of active users of this device
* @c_no_user: completion used when unregistering the device
* @mutex: mutex protecting @num_users and @idr
* @idr: register of shared memory object allocated on this device
* @idr: register of user space shared memory objects allocated or
* registered on this device
* @pool: shared memory pool
*/
struct tee_device {
......
This diff is collapsed.
......@@ -18,6 +18,10 @@
#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4)
/* PRUSS sysc found on AM33xx/AM43xx/AM57xx */
#define SYSC_PRUSS_SUB_MWAIT (1 << 5)
#define SYSC_PRUSS_STANDBY_INIT (1 << 4)
/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
#define SYSC_IDLE_FORCE 0
#define SYSC_IDLE_NO 1
......
/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
*/
#ifndef _DT_BINDINGS_MESON_A1_POWER_H
#define _DT_BINDINGS_MESON_A1_POWER_H
#define PWRC_DSPA_ID 8
#define PWRC_DSPB_ID 9
#define PWRC_UART_ID 10
#define PWRC_DMC_ID 11
#define PWRC_I2C_ID 12
#define PWRC_PSRAM_ID 13
#define PWRC_ACODEC_ID 14
#define PWRC_AUDIO_ID 15
#define PWRC_OTP_ID 16
#define PWRC_DMA_ID 17
#define PWRC_SD_EMMC_ID 18
#define PWRC_RAMA_ID 19
#define PWRC_RAMB_ID 20
#define PWRC_IR_ID 21
#define PWRC_SPICC_ID 22
#define PWRC_SPIFC_ID 23
#define PWRC_USB_ID 24
#define PWRC_NIC_ID 25
#define PWRC_PDMIN_ID 26
#define PWRC_RSA_ID 27
#define PWRC_MAX_ID 28
#endif
......@@ -25,7 +25,6 @@ enum imx_sc_rpc_svc {
IMX_SC_RPC_SVC_PAD = 6,
IMX_SC_RPC_SVC_MISC = 7,
IMX_SC_RPC_SVC_IRQ = 8,
IMX_SC_RPC_SVC_ABORT = 9
};
struct imx_sc_rpc_msg {
......
......@@ -12,6 +12,8 @@ enum {
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
SM_GET_CHIP_ID,
SM_A1_PWRC_SET,
SM_A1_PWRC_GET,
};
struct meson_sm_firmware;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment