Commit 333505a4 authored by Olof Johansson's avatar Olof Johansson

Merge tag 'qcom-drivers-for-5.6' of...

Merge tag 'qcom-drivers-for-5.6' of https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux into arm/drivers

Qualcomm driver updates for v5.6

* SCM major refactoring and cleanup
* Properly flag active only power domains as active only
* Add SC7180 and SM8150 RPMH power domains
* Return EPROBE_DEFER from QMI if packet family is not yet available

* tag 'qcom-drivers-for-5.6' of https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux: (27 commits)
  firmware: qcom_scm: Dynamically support SMCCC and legacy conventions
  firmware: qcom_scm: Remove thin wrappers
  firmware: qcom_scm: Order functions, definitions by service/command
  firmware: qcom_scm-32: Add device argument to atomic calls
  firmware: qcom_scm-32: Create common legacy atomic call
  firmware: qcom_scm-32: Move SMCCC register filling to qcom_scm_call
  firmware: qcom_scm-32: Use qcom_scm_desc in non-atomic calls
  firmware: qcom_scm-32: Add funcnum IDs
  firmware: qcom_scm-32: Use SMC arch wrappers
  firmware: qcom_scm-64: Improve SMC convention detection
  firmware: qcom_scm-64: Move SMC register filling to qcom_scm_call_smccc
  firmware: qcom_scm-64: Add SCM results struct
  firmware: qcom_scm-64: Move svc/cmd/owner into qcom_scm_desc
  firmware: qcom_scm-64: Make SMC macros less magical
  firmware: qcom_scm: Remove unused qcom_scm_get_version
  firmware: qcom_scm: Apply consistent naming scheme to command IDs
  firmware: qcom_scm: Rename macros and structures
  soc: qcom: rpmhpd: Set 'active_only' for active only power domains
  firmware: scm: Add stubs for OCMEM and restore_sec_cfg_available
  dt-bindings: power: rpmpd: Convert rpmpd bindings to yaml
  ...

Link: https://lore.kernel.org/r/20200113204405.GD3325@yogaSigned-off-by: default avatarOlof Johansson <olof@lixom.net>
parents a9e3e12f 9a434cee
......@@ -47,7 +47,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
cache-controller@1100000 {
system-cache-controller@1100000 {
compatible = "qcom,sdm845-llcc";
reg = <0x1100000 0x200000>, <0x1300000 0x50000> ;
reg-names = "llcc_base", "llcc_broadcast_base";
......
Qualcomm RPM/RPMh Power domains
For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
which then translates it into a corresponding voltage on a rail
Required Properties:
- compatible: Should be one of the following
* qcom,msm8976-rpmpd: RPM Power domain for the msm8976 family of SoC
* qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC
* qcom,msm8998-rpmpd: RPM Power domain for the msm8998 family of SoC
* qcom,qcs404-rpmpd: RPM Power domain for the qcs404 family of SoC
* qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC
- #power-domain-cells: number of cells in Power domain specifier
must be 1.
- operating-points-v2: Phandle to the OPP table for the Power domain.
Refer to Documentation/devicetree/bindings/power/power_domain.txt
and Documentation/devicetree/bindings/opp/opp.txt for more details
Refer to <dt-bindings/power/qcom-rpmpd.h> for the level values for
various OPPs for different platforms as well as Power domain indexes
Example: rpmh power domain controller and OPP table
#include <dt-bindings/power/qcom-rpmhpd.h>
opp-level values specified in the OPP tables for RPMh power domains
should use the RPMH_REGULATOR_LEVEL_* constants from
<dt-bindings/power/qcom-rpmhpd.h>
rpmhpd: power-controller {
compatible = "qcom,sdm845-rpmhpd";
#power-domain-cells = <1>;
operating-points-v2 = <&rpmhpd_opp_table>;
rpmhpd_opp_table: opp-table {
compatible = "operating-points-v2";
rpmhpd_opp_ret: opp1 {
opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
};
rpmhpd_opp_min_svs: opp2 {
opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
};
rpmhpd_opp_low_svs: opp3 {
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
};
rpmhpd_opp_svs: opp4 {
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
};
rpmhpd_opp_svs_l1: opp5 {
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
};
rpmhpd_opp_nom: opp6 {
opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
};
rpmhpd_opp_nom_l1: opp7 {
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
};
rpmhpd_opp_nom_l2: opp8 {
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
};
rpmhpd_opp_turbo: opp9 {
opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
};
rpmhpd_opp_turbo_l1: opp10 {
opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
};
};
};
Example: rpm power domain controller and OPP table
rpmpd: power-controller {
compatible = "qcom,msm8996-rpmpd";
#power-domain-cells = <1>;
operating-points-v2 = <&rpmpd_opp_table>;
rpmpd_opp_table: opp-table {
compatible = "operating-points-v2";
rpmpd_opp_low: opp1 {
opp-level = <1>;
};
rpmpd_opp_ret: opp2 {
opp-level = <2>;
};
rpmpd_opp_svs: opp3 {
opp-level = <3>;
};
rpmpd_opp_normal: opp4 {
opp-level = <4>;
};
rpmpd_opp_high: opp5 {
opp-level = <5>;
};
rpmpd_opp_turbo: opp6 {
opp-level = <6>;
};
};
};
Example: Client/Consumer device using OPP table
leaky-device0@12350000 {
compatible = "foo,i-leak-current";
reg = <0x12350000 0x1000>;
power-domains = <&rpmhpd SDM845_MX>;
operating-points-v2 = <&leaky_opp_table>;
};
leaky_opp_table: opp-table {
compatible = "operating-points-v2";
opp1 {
opp-hz = /bits/ 64 <144000>;
required-opps = <&rpmhpd_opp_low>;
};
opp2 {
opp-hz = /bits/ 64 <400000>;
required-opps = <&rpmhpd_opp_ret>;
};
opp3 {
opp-hz = /bits/ 64 <20000000>;
required-opps = <&rpmpd_opp_svs>;
};
opp4 {
opp-hz = /bits/ 64 <25000000>;
required-opps = <&rpmpd_opp_normal>;
};
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/power/qcom,rpmpd.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm RPM/RPMh Power domains
maintainers:
- Rajendra Nayak <rnayak@codeaurora.org>
description:
For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
which then translates it into a corresponding voltage on a rail.
properties:
compatible:
enum:
- qcom,msm8976-rpmpd
- qcom,msm8996-rpmpd
- qcom,msm8998-rpmpd
- qcom,qcs404-rpmpd
- qcom,sc7180-rpmhpd
- qcom,sdm845-rpmhpd
- qcom,sm8150-rpmhpd
'#power-domain-cells':
const: 1
operating-points-v2: true
opp-table:
type: object
required:
- compatible
- '#power-domain-cells'
- operating-points-v2
additionalProperties: false
examples:
- |
// Example 1 (rpmh power domain controller and OPP table):
#include <dt-bindings/power/qcom-rpmpd.h>
rpmhpd: power-controller {
compatible = "qcom,sdm845-rpmhpd";
#power-domain-cells = <1>;
operating-points-v2 = <&rpmhpd_opp_table>;
rpmhpd_opp_table: opp-table {
compatible = "operating-points-v2";
rpmhpd_opp_ret: opp1 {
opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
};
rpmhpd_opp_min_svs: opp2 {
opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
};
rpmhpd_opp_low_svs: opp3 {
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
};
rpmhpd_opp_svs: opp4 {
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
};
rpmhpd_opp_svs_l1: opp5 {
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
};
rpmhpd_opp_nom: opp6 {
opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
};
rpmhpd_opp_nom_l1: opp7 {
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
};
rpmhpd_opp_nom_l2: opp8 {
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
};
rpmhpd_opp_turbo: opp9 {
opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
};
rpmhpd_opp_turbo_l1: opp10 {
opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
};
};
};
- |
// Example 2 (rpm power domain controller and OPP table):
rpmpd: power-controller {
compatible = "qcom,msm8996-rpmpd";
#power-domain-cells = <1>;
operating-points-v2 = <&rpmpd_opp_table>;
rpmpd_opp_table: opp-table {
compatible = "operating-points-v2";
rpmpd_opp_low: opp1 {
opp-level = <1>;
};
rpmpd_opp_ret: opp2 {
opp-level = <2>;
};
rpmpd_opp_svs: opp3 {
opp-level = <3>;
};
rpmpd_opp_normal: opp4 {
opp-level = <4>;
};
rpmpd_opp_high: opp5 {
opp-level = <5>;
};
rpmpd_opp_turbo: opp6 {
opp-level = <6>;
};
};
};
- |
// Example 3 (Client/Consumer device using OPP table):
leaky-device0@12350000 {
compatible = "foo,i-leak-current";
reg = <0x12350000 0x1000>;
power-domains = <&rpmhpd 0>;
operating-points-v2 = <&leaky_opp_table>;
};
leaky_opp_table: opp-table {
compatible = "operating-points-v2";
opp1 {
opp-hz = /bits/ 64 <144000>;
required-opps = <&rpmhpd_opp_low>;
};
opp2 {
opp-hz = /bits/ 64 <400000>;
required-opps = <&rpmhpd_opp_ret>;
};
opp3 {
opp-hz = /bits/ 64 <20000000>;
required-opps = <&rpmpd_opp_svs>;
};
opp4 {
opp-hz = /bits/ 64 <25000000>;
required-opps = <&rpmpd_opp_normal>;
};
};
...
......@@ -239,14 +239,6 @@ config QCOM_SCM
depends on ARM || ARM64
select RESET_CONTROLLER
config QCOM_SCM_32
def_bool y
depends on QCOM_SCM && ARM
config QCOM_SCM_64
def_bool y
depends on QCOM_SCM && ARM64
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
depends on QCOM_SCM
......
......@@ -17,10 +17,7 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
#include "qcom_scm.h"
#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
struct qcom_scm_entry {
int flag;
void *entry;
};
static struct qcom_scm_entry qcom_scm_wb[] = {
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
};
static DEFINE_MUTEX(qcom_scm_lock);
/**
* struct qcom_scm_command - one SCM command buffer
* @len: total available memory for command and response
* @buf_offset: start of command buffer
* @resp_hdr_offset: start of response buffer
* @id: command to be executed
* @buf: buffer returned from qcom_scm_get_command_buffer()
*
* An SCM command is laid out in memory as follows:
*
* ------------------- <--- struct qcom_scm_command
* | command header |
* ------------------- <--- qcom_scm_get_command_buffer()
* | command buffer |
* ------------------- <--- struct qcom_scm_response and
* | response header | qcom_scm_command_to_response()
* ------------------- <--- qcom_scm_get_response_buffer()
* | response buffer |
* -------------------
*
* There can be arbitrary padding between the headers and buffers so
* you should always use the appropriate qcom_scm_get_*_buffer() routines
* to access the buffers in a safe manner.
*/
struct qcom_scm_command {
__le32 len;
__le32 buf_offset;
__le32 resp_hdr_offset;
__le32 id;
__le32 buf[0];
};
/**
* struct qcom_scm_response - one SCM response buffer
* @len: total available memory for response
* @buf_offset: start of response data relative to start of qcom_scm_response
* @is_complete: indicates if the command has finished processing
*/
struct qcom_scm_response {
__le32 len;
__le32 buf_offset;
__le32 is_complete;
};
/**
* qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
* @cmd: command
*
* Returns a pointer to a response for a command.
*/
static inline struct qcom_scm_response *qcom_scm_command_to_response(
const struct qcom_scm_command *cmd)
{
return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
}
/**
* qcom_scm_get_command_buffer() - Get a pointer to a command buffer
* @cmd: command
*
* Returns a pointer to the command buffer of a command.
*/
static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
{
return (void *)cmd->buf;
}
/**
* qcom_scm_get_response_buffer() - Get a pointer to a response buffer
* @rsp: response
*
* Returns a pointer to a response buffer of a response.
*/
static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
{
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
static u32 smc(u32 cmd_addr)
{
int context_id;
register u32 r0 asm("r0") = 1;
register u32 r1 asm("r1") = (u32)&context_id;
register u32 r2 asm("r2") = cmd_addr;
do {
asm volatile(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
: "r3", "r12");
} while (r0 == QCOM_SCM_INTERRUPTED);
return r0;
}
/**
* qcom_scm_call() - Send an SCM command
* @dev: struct device
* @svc_id: service identifier
* @cmd_id: command identifier
* @cmd_buf: command buffer
* @cmd_len: length of the command buffer
* @resp_buf: response buffer
* @resp_len: length of the response buffer
*
* Sends a command to the SCM and waits for the command to finish processing.
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
* must be flushed before invoking qcom_scm_call and invalidated in the cache
* immediately after qcom_scm_call returns. Cache maintenance on the command
* and response buffers is taken care of by qcom_scm_call; however, callers are
* responsible for any other cached buffers passed over to the secure world.
*/
static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const void *cmd_buf, size_t cmd_len, void *resp_buf,
size_t resp_len)
{
int ret;
struct qcom_scm_command *cmd;
struct qcom_scm_response *rsp;
size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
dma_addr_t cmd_phys;
cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->len = cpu_to_le32(alloc_len);
cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
if (cmd_buf)
memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
rsp = qcom_scm_command_to_response(cmd);
cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, cmd_phys)) {
kfree(cmd);
return -ENOMEM;
}
mutex_lock(&qcom_scm_lock);
ret = smc(cmd_phys);
if (ret < 0)
ret = qcom_scm_remap_error(ret);
mutex_unlock(&qcom_scm_lock);
if (ret)
goto out;
do {
dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
sizeof(*rsp), DMA_FROM_DEVICE);
} while (!rsp->is_complete);
if (resp_buf) {
dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
le32_to_cpu(rsp->buf_offset),
resp_len, DMA_FROM_DEVICE);
memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
resp_len);
}
out:
dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
kfree(cmd);
return ret;
}
#define SCM_CLASS_REGISTER (0x2 << 8)
#define SCM_MASK_IRQS BIT(5)
#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
SCM_CLASS_REGISTER | \
SCM_MASK_IRQS | \
(n & 0xf))
/**
* qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
* @svc_id: service identifier
* @cmd_id: command identifier
* @arg1: first argument
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable, atomic and SMP safe.
*/
static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
{
int context_id;
register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
register u32 r1 asm("r1") = (u32)&context_id;
register u32 r2 asm("r2") = arg1;
asm volatile(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
: "r3", "r12");
return r0;
}
/**
* qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
* @svc_id: service identifier
* @cmd_id: command identifier
* @arg1: first argument
* @arg2: second argument
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable, atomic and SMP safe.
*/
static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
{
int context_id;
register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
register u32 r1 asm("r1") = (u32)&context_id;
register u32 r2 asm("r2") = arg1;
register u32 r3 asm("r3") = arg2;
asm volatile(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
__asmeq("%4", "r3")
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3)
: "r12");
return r0;
}
u32 qcom_scm_get_version(void)
{
int context_id;
static u32 version = -1;
register u32 r0 asm("r0");
register u32 r1 asm("r1");
if (version != -1)
return version;
mutex_lock(&qcom_scm_lock);
r0 = 0x1 << 8;
r1 = (u32)&context_id;
do {
asm volatile(
__asmeq("%0", "r0")
__asmeq("%1", "r1")
__asmeq("%2", "r0")
__asmeq("%3", "r1")
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0), "=r" (r1)
: "r" (r0), "r" (r1)
: "r2", "r3", "r12");
} while (r0 == QCOM_SCM_INTERRUPTED);
version = r1;
mutex_unlock(&qcom_scm_lock);
return version;
}
EXPORT_SYMBOL(qcom_scm_get_version);
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the cold boot address of the cpus. Any cpu outside the supported
* range would be removed from the cpu present mask.
*/
int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
int flags = 0;
int cpu;
int scm_cb_flags[] = {
QCOM_SCM_FLAG_COLDBOOT_CPU0,
QCOM_SCM_FLAG_COLDBOOT_CPU1,
QCOM_SCM_FLAG_COLDBOOT_CPU2,
QCOM_SCM_FLAG_COLDBOOT_CPU3,
};
if (!cpus || (cpus && cpumask_empty(cpus)))
return -EINVAL;
for_each_cpu(cpu, cpus) {
if (cpu < ARRAY_SIZE(scm_cb_flags))
flags |= scm_cb_flags[cpu];
else
set_cpu_present(cpu, false);
}
return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
flags, virt_to_phys(entry));
}
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
const cpumask_t *cpus)
{
int ret;
int flags = 0;
int cpu;
struct {
__le32 flags;
__le32 addr;
} cmd;
/*
* Reassign only if we are switching from hotplug entry point
* to cpuidle entry point or vice versa.
*/
for_each_cpu(cpu, cpus) {
if (entry == qcom_scm_wb[cpu].entry)
continue;
flags |= qcom_scm_wb[cpu].flag;
}
/* No change in entry function */
if (!flags)
return 0;
cmd.addr = cpu_to_le32(virt_to_phys(entry));
cmd.flags = cpu_to_le32(flags);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
&cmd, sizeof(cmd), NULL, 0);
if (!ret) {
for_each_cpu(cpu, cpus)
qcom_scm_wb[cpu].entry = entry;
}
return ret;
}
/**
* qcom_scm_cpu_power_down() - Power down the cpu
* @flags - Flags to flush cache
*
* This is an end point to power down cpu. If there was a pending interrupt,
* the control would return from this function, otherwise, the cpu jumps to the
* warm boot entry point set for this cpu upon reset.
*/
void __qcom_scm_cpu_power_down(u32 flags)
{
qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
flags & QCOM_SCM_FLUSH_FLAG_MASK);
}
int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
{
int ret;
__le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
__le32 ret_val = 0;
ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
&svc_cmd, sizeof(svc_cmd), &ret_val,
sizeof(ret_val));
if (ret)
return ret;
return le32_to_cpu(ret_val);
}
int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
u32 req_cnt, u32 *resp)
{
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
return -ERANGE;
return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size,
u32 mode)
{
struct ocmem_tz_lock {
__le32 id;
__le32 offset;
__le32 size;
__le32 mode;
} request;
request.id = cpu_to_le32(id);
request.offset = cpu_to_le32(offset);
request.size = cpu_to_le32(size);
request.mode = cpu_to_le32(mode);
return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD,
&request, sizeof(request), NULL, 0);
}
int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size)
{
struct ocmem_tz_unlock {
__le32 id;
__le32 offset;
__le32 size;
} request;
request.id = cpu_to_le32(id);
request.offset = cpu_to_le32(offset);
request.size = cpu_to_le32(size);
return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD,
&request, sizeof(request), NULL, 0);
}
void __qcom_scm_init(void)
{
}
bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
{
__le32 out;
__le32 in;
int ret;
in = cpu_to_le32(peripheral);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_IS_SUPPORTED_CMD,
&in, sizeof(in),
&out, sizeof(out));
return ret ? false : !!out;
}
int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
dma_addr_t metadata_phys)
{
__le32 scm_ret;
int ret;
struct {
__le32 proc;
__le32 image_addr;
} request;
request.proc = cpu_to_le32(peripheral);
request.image_addr = cpu_to_le32(metadata_phys);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_INIT_IMAGE_CMD,
&request, sizeof(request),
&scm_ret, sizeof(scm_ret));
return ret ? : le32_to_cpu(scm_ret);
}
int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
phys_addr_t addr, phys_addr_t size)
{
__le32 scm_ret;
int ret;
struct {
__le32 proc;
__le32 addr;
__le32 len;
} request;
request.proc = cpu_to_le32(peripheral);
request.addr = cpu_to_le32(addr);
request.len = cpu_to_le32(size);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_MEM_SETUP_CMD,
&request, sizeof(request),
&scm_ret, sizeof(scm_ret));
return ret ? : le32_to_cpu(scm_ret);
}
int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
{
__le32 out;
__le32 in;
int ret;
in = cpu_to_le32(peripheral);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
&in, sizeof(in),
&out, sizeof(out));
return ret ? : le32_to_cpu(out);
}
int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
{
__le32 out;
__le32 in;
int ret;
in = cpu_to_le32(peripheral);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_SHUTDOWN_CMD,
&in, sizeof(in),
&out, sizeof(out));
return ret ? : le32_to_cpu(out);
}
int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
{
__le32 out;
__le32 in = cpu_to_le32(reset);
int ret;
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
&in, sizeof(in),
&out, sizeof(out));
return ret ? : le32_to_cpu(out);
}
int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
}
int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
{
struct {
__le32 state;
__le32 id;
} req;
__le32 scm_ret = 0;
int ret;
req.state = cpu_to_le32(state);
req.id = cpu_to_le32(id);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
&req, sizeof(req), &scm_ret, sizeof(scm_ret));
return ret ? : le32_to_cpu(scm_ret);
}
int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
size_t mem_sz, phys_addr_t src, size_t src_sz,
phys_addr_t dest, size_t dest_sz)
{
return -ENODEV;
}
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare)
{
struct msm_scm_sec_cfg {
__le32 id;
__le32 ctx_bank_num;
} cfg;
int ret, scm_ret = 0;
cfg.id = cpu_to_le32(device_id);
cfg.ctx_bank_num = cpu_to_le32(spare);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
&cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret));
if (ret || scm_ret)
return ret ? ret : -EINVAL;
return 0;
}
int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size)
{
return -ENODEV;
}
int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
u32 spare)
{
return -ENODEV;
}
int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
unsigned int *val)
{
int ret;
ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
if (ret >= 0)
*val = ret;
return ret < 0 ? ret : 0;
}
int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
{
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
{
return -ENODEV;
}
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
#include "qcom_scm.h"
#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
#define MAX_QCOM_SCM_ARGS 10
#define MAX_QCOM_SCM_RETS 3
enum qcom_scm_arg_types {
QCOM_SCM_VAL,
QCOM_SCM_RO,
QCOM_SCM_RW,
QCOM_SCM_BUFVAL,
};
#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
(((a) & 0x3) << 4) | \
(((b) & 0x3) << 6) | \
(((c) & 0x3) << 8) | \
(((d) & 0x3) << 10) | \
(((e) & 0x3) << 12) | \
(((f) & 0x3) << 14) | \
(((g) & 0x3) << 16) | \
(((h) & 0x3) << 18) | \
(((i) & 0x3) << 20) | \
(((j) & 0x3) << 22) | \
((num) & 0xf))
#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
/**
* struct qcom_scm_desc
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
* @res: The values returned by the secure syscall
*/
struct qcom_scm_desc {
u32 arginfo;
u64 args[MAX_QCOM_SCM_ARGS];
};
static u64 qcom_smccc_convention = -1;
static DEFINE_MUTEX(qcom_scm_lock);
#define QCOM_SCM_EBUSY_WAIT_MS 30
#define QCOM_SCM_EBUSY_MAX_RETRY 20
#define N_EXT_QCOM_SCM_ARGS 7
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, u32 fn_id,
u64 x5, u32 type)
{
u64 cmd;
struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
ARM_SMCCC_OWNER_SIP, fn_id);
quirk.state.a6 = 0;
do {
arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2], x5,
quirk.state.a6, 0, res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
cmd = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, u32 fn_id,
u64 x5, bool atomic)
{
int retry_count = 0;
if (atomic) {
__qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
return;
}
do {
mutex_lock(&qcom_scm_lock);
__qcom_scm_call_do(desc, res, fn_id, x5,
ARM_SMCCC_STD_CALL);
mutex_unlock(&qcom_scm_lock);
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
}
static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
__le32 *args = args_virt;
for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
args[i] = cpu_to_le32(desc->args[i +
FIRST_EXT_ARG_IDX]);
} else {
__le64 *args = args_virt;
for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
args[i] = cpu_to_le64(desc->args[i +
FIRST_EXT_ARG_IDX]);
}
args_phys = dma_map_single(dev, args_virt, alloc_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, args_phys)) {
kfree(args_virt);
return -ENOMEM;
}
x5 = args_phys;
}
qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0);
return 0;
}
/**
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*/
static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
{
might_sleep();
return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
}
/**
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
* Sends a command to the SCM and waits for the command to finish processing.
* This can be called in atomic context.
*/
static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
{
return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
}
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the cold boot address of the cpus. Any cpu outside the supported
* range would be removed from the cpu present mask.
*/
int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
return -ENOTSUPP;
}
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
* @dev: Device pointer
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
const cpumask_t *cpus)
{
return -ENOTSUPP;
}
/**
* qcom_scm_cpu_power_down() - Power down the cpu
* @flags - Flags to flush cache
*
* This is an end point to power down cpu. If there was a pending interrupt,
* the control would return from this function, otherwise, the cpu jumps to the
* warm boot entry point set for this cpu upon reset.
*/
void __qcom_scm_cpu_power_down(u32 flags)
{
}
int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.arginfo = QCOM_SCM_ARGS(1);
desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
u32 req_cnt, u32 *resp)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
return -ERANGE;
desc.args[0] = req[0].addr;
desc.args[1] = req[0].val;
desc.args[2] = req[1].addr;
desc.args[3] = req[1].val;
desc.args[4] = req[2].addr;
desc.args[5] = req[2].val;
desc.args[6] = req[3].addr;
desc.args[7] = req[3].val;
desc.args[8] = req[4].addr;
desc.args[9] = req[4].val;
desc.arginfo = QCOM_SCM_ARGS(10);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
&res);
*resp = res.a1;
return ret;
}
int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset,
uint32_t size, uint32_t mode)
{
return -ENOTSUPP;
}
int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset,
uint32_t size)
{
return -ENOTSUPP;
}
void __qcom_scm_init(void)
{
u64 cmd;
struct arm_smccc_res res;
u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
/* First try a SMC64 call */
cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
ARM_SMCCC_OWNER_SIP, function);
arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
0, 0, 0, 0, 0, &res);
if (!res.a0 && res.a1)
qcom_smccc_convention = ARM_SMCCC_SMC_64;
else
qcom_smccc_convention = ARM_SMCCC_SMC_32;
}
bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = peripheral;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_IS_SUPPORTED_CMD,
&desc, &res);
return ret ? false : !!res.a1;
}
int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
dma_addr_t metadata_phys)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = peripheral;
desc.args[1] = metadata_phys;
desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
phys_addr_t addr, phys_addr_t size)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = peripheral;
desc.args[1] = addr;
desc.args[2] = size;
desc.arginfo = QCOM_SCM_ARGS(3);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = peripheral;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = peripheral;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = reset;
desc.args[1] = 0;
desc.arginfo = QCOM_SCM_ARGS(2);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
&res);
return ret ? : res.a1;
}
int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = state;
desc.args[1] = id;
desc.arginfo = QCOM_SCM_ARGS(2);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
size_t mem_sz, phys_addr_t src, size_t src_sz,
phys_addr_t dest, size_t dest_sz)
{
int ret;
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = mem_region;
desc.args[1] = mem_sz;
desc.args[2] = src;
desc.args[3] = src_sz;
desc.args[4] = dest;
desc.args[5] = dest_sz;
desc.args[6] = 0;
desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
QCOM_SCM_VAL, QCOM_SCM_VAL);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
QCOM_MEM_PROT_ASSIGN_ID,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = device_id;
desc.args[1] = spare;
desc.arginfo = QCOM_SCM_ARGS(2);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
&desc, &res);
return ret ? : res.a1;
}
int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = spare;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res);
if (size)
*size = res.a1;
return ret ? : res.a2;
}
int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
u32 spare)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = addr;
desc.args[1] = size;
desc.args[2] = spare;
desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_VAL);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res);
/* the pg table has been initialized already, ignore the error */
if (ret == -EPERM)
ret = 0;
return ret;
}
int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
&desc, &res);
}
int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
unsigned int *val)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = addr;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
&desc, &res);
if (ret >= 0)
*val = res.a1;
return ret < 0 ? ret : 0;
}
int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = addr;
desc.args[1] = val;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
desc.args[1] = en;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
}
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
#include "qcom_scm.h"
static DEFINE_MUTEX(qcom_scm_lock);
/**
* struct arm_smccc_args
* @args: The array of values used in registers in smc instruction
*/
struct arm_smccc_args {
unsigned long args[8];
};
/**
* struct scm_legacy_command - one SCM command buffer
* @len: total available memory for command and response
* @buf_offset: start of command buffer
* @resp_hdr_offset: start of response buffer
* @id: command to be executed
* @buf: buffer returned from scm_legacy_get_command_buffer()
*
* An SCM command is laid out in memory as follows:
*
* ------------------- <--- struct scm_legacy_command
* | command header |
* ------------------- <--- scm_legacy_get_command_buffer()
* | command buffer |
* ------------------- <--- struct scm_legacy_response and
* | response header | scm_legacy_command_to_response()
* ------------------- <--- scm_legacy_get_response_buffer()
* | response buffer |
* -------------------
*
* There can be arbitrary padding between the headers and buffers so
* you should always use the appropriate scm_legacy_get_*_buffer() routines
* to access the buffers in a safe manner.
*/
struct scm_legacy_command {
__le32 len;
__le32 buf_offset;
__le32 resp_hdr_offset;
__le32 id;
__le32 buf[0];
};
/**
* struct scm_legacy_response - one SCM response buffer
* @len: total available memory for response
* @buf_offset: start of response data relative to start of scm_legacy_response
* @is_complete: indicates if the command has finished processing
*/
struct scm_legacy_response {
__le32 len;
__le32 buf_offset;
__le32 is_complete;
};
/**
* scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
* @cmd: command
*
* Returns a pointer to a response for a command.
*/
static inline struct scm_legacy_response *scm_legacy_command_to_response(
const struct scm_legacy_command *cmd)
{
return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
}
/**
* scm_legacy_get_command_buffer() - Get a pointer to a command buffer
* @cmd: command
*
* Returns a pointer to the command buffer of a command.
*/
static inline void *scm_legacy_get_command_buffer(
const struct scm_legacy_command *cmd)
{
return (void *)cmd->buf;
}
/**
* scm_legacy_get_response_buffer() - Get a pointer to a response buffer
* @rsp: response
*
* Returns a pointer to a response buffer of a response.
*/
static inline void *scm_legacy_get_response_buffer(
const struct scm_legacy_response *rsp)
{
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
static void __scm_legacy_do(const struct arm_smccc_args *smc,
struct arm_smccc_res *res)
{
do {
arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
smc->args[3], smc->args[4], smc->args[5],
smc->args[6], smc->args[7], res);
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
/**
* qcom_scm_call() - Sends a command to the SCM and waits for the command to
* finish processing.
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
* must be flushed before invoking qcom_scm_call and invalidated in the cache
* immediately after qcom_scm_call returns. Cache maintenance on the command
* and response buffers is taken care of by qcom_scm_call; however, callers are
* responsible for any other cached buffers passed over to the secure world.
*/
int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
u8 arglen = desc->arginfo & 0xf;
int ret = 0, context_id;
unsigned int i;
struct scm_legacy_command *cmd;
struct scm_legacy_response *rsp;
struct arm_smccc_args smc = {0};
struct arm_smccc_res smc_res;
const size_t cmd_len = arglen * sizeof(__le32);
const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
dma_addr_t cmd_phys;
__le32 *arg_buf;
const __le32 *res_buf;
cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->len = cpu_to_le32(alloc_len);
cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
arg_buf = scm_legacy_get_command_buffer(cmd);
for (i = 0; i < arglen; i++)
arg_buf[i] = cpu_to_le32(desc->args[i]);
rsp = scm_legacy_command_to_response(cmd);
cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, cmd_phys)) {
kfree(cmd);
return -ENOMEM;
}
smc.args[0] = 1;
smc.args[1] = (unsigned long)&context_id;
smc.args[2] = cmd_phys;
mutex_lock(&qcom_scm_lock);
__scm_legacy_do(&smc, &smc_res);
if (smc_res.a0)
ret = qcom_scm_remap_error(smc_res.a0);
mutex_unlock(&qcom_scm_lock);
if (ret)
goto out;
do {
dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
sizeof(*rsp), DMA_FROM_DEVICE);
} while (!rsp->is_complete);
dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
le32_to_cpu(rsp->buf_offset),
resp_len, DMA_FROM_DEVICE);
if (res) {
res_buf = scm_legacy_get_response_buffer(rsp);
for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
res->result[i] = le32_to_cpu(res_buf[i]);
}
out:
dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
kfree(cmd);
return ret;
}
#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
#define SCM_LEGACY_MASK_IRQS BIT(5)
#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
((SCM_LEGACY_FNID(svc, cmd) << 12) | \
SCM_LEGACY_CLASS_REGISTER | \
SCM_LEGACY_MASK_IRQS | \
(n & 0xf))
/**
* qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable, atomic and SMP safe.
*/
int scm_legacy_call_atomic(struct device *unused,
const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
int context_id;
struct arm_smccc_res smc_res;
size_t arglen = desc->arginfo & 0xf;
BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
(unsigned long)&context_id,
desc->args[0], desc->args[1], desc->args[2],
desc->args[3], desc->args[4], 0, &smc_res);
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
res->result[2] = smc_res.a3;
}
return smc_res.a0;
}
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
#include "qcom_scm.h"
/**
* struct arm_smccc_args
* @args: The array of values used in registers in smc instruction
*/
struct arm_smccc_args {
unsigned long args[8];
};
static DEFINE_MUTEX(qcom_scm_lock);
#define QCOM_SCM_EBUSY_WAIT_MS 30
#define QCOM_SCM_EBUSY_MAX_RETRY 20
#define SCM_SMC_N_REG_ARGS 4
#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
#define SCM_SMC_FIRST_REG_IDX 2
#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
struct arm_smccc_res *res)
{
unsigned long a0 = smc->args[0];
struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
quirk.state.a6 = 0;
do {
arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
smc->args[3], smc->args[4], smc->args[5],
quirk.state.a6, smc->args[7], res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
a0 = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
static void __scm_smc_do(const struct arm_smccc_args *smc,
struct arm_smccc_res *res, bool atomic)
{
int retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
return;
}
do {
mutex_lock(&qcom_scm_lock);
__scm_smc_do_quirk(smc, res);
mutex_unlock(&qcom_scm_lock);
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
}
int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
int i;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
u32 qcom_smccc_convention =
(qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
struct arm_smccc_res smc_res;
struct arm_smccc_args smc = {0};
smc.args[0] = ARM_SMCCC_CALL_VAL(
smccc_call_type,
qcom_smccc_convention,
desc->owner,
SCM_SMC_FNID(desc->svc, desc->cmd));
smc.args[1] = desc->arginfo;
for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
__le32 *args = args_virt;
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
args[i] = cpu_to_le32(desc->args[i +
SCM_SMC_FIRST_EXT_IDX]);
} else {
__le64 *args = args_virt;
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
args[i] = cpu_to_le64(desc->args[i +
SCM_SMC_FIRST_EXT_IDX]);
}
args_phys = dma_map_single(dev, args_virt, alloc_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, args_phys)) {
kfree(args_virt);
return -ENOMEM;
}
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
__scm_smc_do(&smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
res->result[2] = smc_res.a3;
}
return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm SCM driver
*
* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/platform_device.h>
......@@ -19,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
#include <linux/arm-smccc.h>
#include "qcom_scm.h"
......@@ -52,6 +50,35 @@ struct qcom_scm_mem_map_info {
__le64 mem_size;
};
#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
struct qcom_scm_wb_entry {
int flag;
void *entry;
};
static struct qcom_scm_wb_entry qcom_scm_wb[] = {
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
};
static const char *qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
[SMC_CONVENTION_ARM_64] = "smc arm 64",
[SMC_CONVENTION_LEGACY] = "smc legacy",
};
static struct qcom_scm *__scm;
static int qcom_scm_clk_enable(void)
......@@ -87,149 +114,308 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the cold boot address of the cpus. Any cpu outside the supported
* range would be removed from the cpu present mask.
*/
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
u32 cmd_id);
enum qcom_scm_convention qcom_scm_convention;
static bool has_queried __read_mostly;
static DEFINE_SPINLOCK(query_lock);
static void __query_convention(void)
{
return __qcom_scm_set_cold_boot_addr(entry, cpus);
unsigned long flags;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_INFO,
.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
QCOM_SCM_INFO_IS_CALL_AVAIL) |
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
.arginfo = QCOM_SCM_ARGS(1),
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
spin_lock_irqsave(&query_lock, flags);
if (has_queried)
goto out;
qcom_scm_convention = SMC_CONVENTION_ARM_64;
// Device isn't required as there is only one argument - no device
// needed to dma_map_single to secure world
ret = scm_smc_call(NULL, &desc, &res, true);
if (!ret && res.result[0] == 1)
goto out;
qcom_scm_convention = SMC_CONVENTION_ARM_32;
ret = scm_smc_call(NULL, &desc, &res, true);
if (!ret && res.result[0] == 1)
goto out;
qcom_scm_convention = SMC_CONVENTION_LEGACY;
out:
has_queried = true;
spin_unlock_irqrestore(&query_lock, flags);
pr_info("qcom_scm: convention: %s\n",
qcom_scm_convention_names[qcom_scm_convention]);
}
EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
static inline enum qcom_scm_convention __get_convention(void)
{
return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
if (unlikely(!has_queried))
__query_convention();
return qcom_scm_convention;
}
EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
/**
* qcom_scm_cpu_power_down() - Power down the cpu
* @flags - Flags to flush cache
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
*
* This is an end point to power down cpu. If there was a pending interrupt,
* the control would return from this function, otherwise, the cpu jumps to the
* warm boot entry point set for this cpu upon reset.
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*/
void qcom_scm_cpu_power_down(u32 flags)
static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
__qcom_scm_cpu_power_down(flags);
might_sleep();
switch (__get_convention()) {
case SMC_CONVENTION_ARM_32:
case SMC_CONVENTION_ARM_64:
return scm_smc_call(dev, desc, res, false);
case SMC_CONVENTION_LEGACY:
return scm_legacy_call(dev, desc, res);
default:
pr_err("Unknown current SCM calling convention.\n");
return -EINVAL;
}
}
EXPORT_SYMBOL(qcom_scm_cpu_power_down);
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
* Return true if HDCP is supported, false if not.
* Sends a command to the SCM and waits for the command to finish processing.
* This can be called in atomic context.
*/
bool qcom_scm_hdcp_available(void)
static int qcom_scm_call_atomic(struct device *dev,
const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
int ret = qcom_scm_clk_enable();
if (ret)
return ret;
switch (__get_convention()) {
case SMC_CONVENTION_ARM_32:
case SMC_CONVENTION_ARM_64:
return scm_smc_call(dev, desc, res, true);
case SMC_CONVENTION_LEGACY:
return scm_legacy_call_atomic(dev, desc, res);
default:
pr_err("Unknown current SCM calling convention.\n");
return -EINVAL;
}
}
ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
QCOM_SCM_CMD_HDCP);
static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
u32 cmd_id)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_INFO,
.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
desc.arginfo = QCOM_SCM_ARGS(1);
switch (__get_convention()) {
case SMC_CONVENTION_ARM_32:
case SMC_CONVENTION_ARM_64:
desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
break;
case SMC_CONVENTION_LEGACY:
desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
break;
default:
pr_err("Unknown SMC convention being used\n");
return -EINVAL;
}
qcom_scm_clk_disable();
ret = qcom_scm_call(dev, &desc, &res);
return ret > 0 ? true : false;
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
/**
* qcom_scm_hdcp_req() - Send HDCP request.
* @req: HDCP request array
* @req_cnt: HDCP request array count
* @resp: response buffer passed to SCM
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Write HDCP register(s) through SCM.
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
{
int ret = qcom_scm_clk_enable();
int ret;
int flags = 0;
int cpu;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_ADDR,
.arginfo = QCOM_SCM_ARGS(2),
};
if (ret)
return ret;
/*
* Reassign only if we are switching from hotplug entry point
* to cpuidle entry point or vice versa.
*/
for_each_cpu(cpu, cpus) {
if (entry == qcom_scm_wb[cpu].entry)
continue;
flags |= qcom_scm_wb[cpu].flag;
}
/* No change in entry function */
if (!flags)
return 0;
desc.args[0] = flags;
desc.args[1] = virt_to_phys(entry);
ret = qcom_scm_call(__scm->dev, &desc, NULL);
if (!ret) {
for_each_cpu(cpu, cpus)
qcom_scm_wb[cpu].entry = entry;
}
ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
qcom_scm_clk_disable();
return ret;
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
/**
* qcom_scm_pas_supported() - Check if the peripheral authentication service is
* available for the given peripherial
* @peripheral: peripheral id
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Returns true if PAS is supported for this peripheral, otherwise false.
* Set the cold boot address of the cpus. Any cpu outside the supported
* range would be removed from the cpu present mask.
*/
bool qcom_scm_pas_supported(u32 peripheral)
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
int ret;
int flags = 0;
int cpu;
int scm_cb_flags[] = {
QCOM_SCM_FLAG_COLDBOOT_CPU0,
QCOM_SCM_FLAG_COLDBOOT_CPU1,
QCOM_SCM_FLAG_COLDBOOT_CPU2,
QCOM_SCM_FLAG_COLDBOOT_CPU3,
};
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_ADDR,
.arginfo = QCOM_SCM_ARGS(2),
.owner = ARM_SMCCC_OWNER_SIP,
};
if (!cpus || (cpus && cpumask_empty(cpus)))
return -EINVAL;
ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PAS_IS_SUPPORTED_CMD);
if (ret <= 0)
return false;
for_each_cpu(cpu, cpus) {
if (cpu < ARRAY_SIZE(scm_cb_flags))
flags |= scm_cb_flags[cpu];
else
set_cpu_present(cpu, false);
}
desc.args[0] = flags;
desc.args[1] = virt_to_phys(entry);
return __qcom_scm_pas_supported(__scm->dev, peripheral);
return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_pas_supported);
EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
/**
* qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
* qcom_scm_cpu_power_down() - Power down the cpu
* @flags - Flags to flush cache
*
* This is an end point to power down cpu. If there was a pending interrupt,
* the control would return from this function, otherwise, the cpu jumps to the
* warm boot entry point set for this cpu upon reset.
*/
bool qcom_scm_ocmem_lock_available(void)
void qcom_scm_cpu_power_down(u32 flags)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC,
QCOM_SCM_OCMEM_LOCK_CMD);
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
.arginfo = QCOM_SCM_ARGS(1),
.owner = ARM_SMCCC_OWNER_SIP,
};
qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
EXPORT_SYMBOL(qcom_scm_cpu_power_down);
/**
* qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
* region to the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
* @mode: access mode (WIDE/NARROW)
*/
int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
u32 mode)
int qcom_scm_set_remote_state(u32 state, u32 id)
{
return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode);
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = state,
.args[1] = id,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_ocmem_lock);
EXPORT_SYMBOL(qcom_scm_set_remote_state);
/**
* qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
* region from the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
*/
int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size);
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
.owner = ARM_SMCCC_OWNER_SIP,
};
desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
return qcom_scm_call(__scm->dev, &desc, NULL);
}
static void qcom_scm_set_download_mode(bool enable)
{
bool avail;
int ret = 0;
avail = __qcom_scm_is_call_available(__scm->dev,
QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE);
if (avail) {
ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
} else if (__scm->dload_mode_addr) {
ret = qcom_scm_io_writel(__scm->dload_mode_addr,
enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
if (ret)
dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
}
EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
/**
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
......@@ -248,6 +434,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
dma_addr_t mdata_phys;
void *mdata_buf;
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
/*
* During the scm call memory protection will be enabled for the meta
......@@ -266,14 +460,16 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
if (ret)
goto free_metadata;
ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
free_metadata:
dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
return ret;
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_init_image);
......@@ -289,15 +485,25 @@ EXPORT_SYMBOL(qcom_scm_pas_init_image);
int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
.arginfo = QCOM_SCM_ARGS(3),
.args[0] = peripheral,
.args[1] = addr,
.args[2] = size,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
return ret;
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
......@@ -311,15 +517,23 @@ EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
int qcom_scm_pas_auth_and_reset(u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
return ret;
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
......@@ -332,18 +546,75 @@ EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
int qcom_scm_pas_shutdown(u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
return ret;
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_shutdown);
/**
* qcom_scm_pas_supported() - Check if the peripheral authentication service is
* available for the given peripherial
* @peripheral: peripheral id
*
* Returns true if PAS is supported for this peripheral, otherwise false.
*/
bool qcom_scm_pas_supported(u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PIL_PAS_IS_SUPPORTED);
if (ret <= 0)
return false;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? false : !!res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_supported);
static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = reset,
.args[1] = 0,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? : res.result[0];
}
static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
......@@ -367,6 +638,43 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
.deassert = qcom_scm_pas_reset_deassert,
};
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_IO,
.cmd = QCOM_SCM_IO_READ,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = addr,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
if (ret >= 0)
*val = res.result[0];
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(qcom_scm_io_readl);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_IO,
.cmd = QCOM_SCM_IO_WRITE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = addr,
.args[1] = val,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_io_writel);
/**
* qcom_scm_restore_sec_cfg_available() - Check if secure environment
* supports restore security config interface.
......@@ -376,108 +684,106 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
bool qcom_scm_restore_sec_cfg_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
QCOM_SCM_RESTORE_SEC_CFG);
QCOM_SCM_MP_RESTORE_SEC_CFG);
}
EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
{
return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
}
EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
{
return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = device_id,
.args[1] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{
return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
ret = qcom_scm_call(__scm->dev, &desc, &res);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_io_readl);
EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
return __qcom_scm_io_writel(__scm->dev, addr, val);
}
EXPORT_SYMBOL(qcom_scm_io_writel);
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
static void qcom_scm_set_download_mode(bool enable)
{
bool avail;
int ret = 0;
ret = qcom_scm_call(__scm->dev, &desc, &res);
avail = __qcom_scm_is_call_available(__scm->dev,
QCOM_SCM_SVC_BOOT,
QCOM_SCM_SET_DLOAD_MODE);
if (avail) {
ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
} else if (__scm->dload_mode_addr) {
ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
if (size)
*size = res.result[0];
if (ret)
dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
return ret ? : res.result[1];
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
{
struct device_node *tcsr;
struct device_node *np = dev->of_node;
struct resource res;
u32 offset;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_VAL),
.args[0] = addr,
.args[1] = size,
.args[2] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
int ret;
tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
if (!tcsr)
return 0;
ret = of_address_to_resource(tcsr, 0, &res);
of_node_put(tcsr);
if (ret)
return ret;
ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
if (ret < 0)
return ret;
desc.args[0] = addr;
desc.args[1] = size;
desc.args[2] = spare;
desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_VAL);
*addr = res.start + offset;
ret = qcom_scm_call(__scm->dev, &desc, NULL);
return 0;
}
/* the pg table has been initialized already, ignore the error */
if (ret == -EPERM)
ret = 0;
/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
{
return !!__scm;
return ret;
}
EXPORT_SYMBOL(qcom_scm_is_available);
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
int qcom_scm_set_remote_state(u32 state, u32 id)
static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
size_t mem_sz, phys_addr_t src, size_t src_sz,
phys_addr_t dest, size_t dest_sz)
{
return __qcom_scm_set_remote_state(__scm->dev, state, id);
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_ASSIGN,
.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
QCOM_SCM_VAL, QCOM_SCM_VAL),
.args[0] = mem_region,
.args[1] = mem_sz,
.args[2] = src,
.args[3] = src_sz,
.args[4] = dest,
.args[5] = dest_sz,
.args[6] = 0,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_call(dev, &desc, &res);
return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_set_remote_state);
/**
* qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
......@@ -561,6 +867,184 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
}
EXPORT_SYMBOL(qcom_scm_assign_mem);
/**
* qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
*/
bool qcom_scm_ocmem_lock_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
QCOM_SCM_OCMEM_LOCK_CMD);
}
EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
/**
* qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
* region to the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
* @mode: access mode (WIDE/NARROW)
*/
int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
u32 mode)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_OCMEM,
.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
.args[0] = id,
.args[1] = offset,
.args[2] = size,
.args[3] = mode,
.arginfo = QCOM_SCM_ARGS(4),
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_ocmem_lock);
/**
* qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
* region from the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
*/
int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_OCMEM,
.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
.args[0] = id,
.args[1] = offset,
.args[2] = size,
.arginfo = QCOM_SCM_ARGS(3),
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
* Return true if HDCP is supported, false if not.
*/
bool qcom_scm_hdcp_available(void)
{
int ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
QCOM_SCM_HDCP_INVOKE);
qcom_scm_clk_disable();
return ret > 0 ? true : false;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
/**
* qcom_scm_hdcp_req() - Send HDCP request.
* @req: HDCP request array
* @req_cnt: HDCP request array count
* @resp: response buffer passed to SCM
*
* Write HDCP register(s) through SCM.
*/
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_HDCP,
.cmd = QCOM_SCM_HDCP_INVOKE,
.arginfo = QCOM_SCM_ARGS(10),
.args = {
req[0].addr,
req[0].val,
req[1].addr,
req[1].val,
req[2].addr,
req[2].val,
req[3].addr,
req[3].val,
req[4].addr,
req[4].val
},
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
return -ERANGE;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
*resp = res.result[0];
qcom_scm_clk_disable();
return ret;
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
.args[1] = en,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
struct device_node *np = dev->of_node;
struct resource res;
u32 offset;
int ret;
tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
if (!tcsr)
return 0;
ret = of_address_to_resource(tcsr, 0, &res);
of_node_put(tcsr);
if (ret)
return ret;
ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
if (ret < 0)
return ret;
*addr = res.start + offset;
return 0;
}
/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
{
return !!__scm;
}
EXPORT_SYMBOL(qcom_scm_is_available);
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
......@@ -631,7 +1115,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
__qcom_scm_init();
__query_convention();
/*
* If requested enable "download mode", from this point on warmboot
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_SCM_INT_H
#define __QCOM_SCM_INT_H
#define QCOM_SCM_SVC_BOOT 0x1
#define QCOM_SCM_BOOT_ADDR 0x1
#define QCOM_SCM_SET_DLOAD_MODE 0x10
#define QCOM_SCM_BOOT_ADDR_MC 0x11
#define QCOM_SCM_SET_REMOTE_STATE 0xa
extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
#define QCOM_SCM_FLAG_HLOS 0x01
#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04
extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
const cpumask_t *cpus);
extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
#define QCOM_SCM_CMD_TERMINATE_PC 0x2
enum qcom_scm_convention {
SMC_CONVENTION_UNKNOWN,
SMC_CONVENTION_LEGACY,
SMC_CONVENTION_ARM_32,
SMC_CONVENTION_ARM_64,
};
extern enum qcom_scm_convention qcom_scm_convention;
#define MAX_QCOM_SCM_ARGS 10
#define MAX_QCOM_SCM_RETS 3
enum qcom_scm_arg_types {
QCOM_SCM_VAL,
QCOM_SCM_RO,
QCOM_SCM_RW,
QCOM_SCM_BUFVAL,
};
#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
(((a) & 0x3) << 4) | \
(((b) & 0x3) << 6) | \
(((c) & 0x3) << 8) | \
(((d) & 0x3) << 10) | \
(((e) & 0x3) << 12) | \
(((f) & 0x3) << 14) | \
(((g) & 0x3) << 16) | \
(((h) & 0x3) << 18) | \
(((i) & 0x3) << 20) | \
(((j) & 0x3) << 22) | \
((num) & 0xf))
#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
/**
* struct qcom_scm_desc
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
*/
struct qcom_scm_desc {
u32 svc;
u32 cmd;
u32 arginfo;
u64 args[MAX_QCOM_SCM_ARGS];
u32 owner;
};
/**
* struct qcom_scm_res
* @result: The values returned by the secure syscall
*/
struct qcom_scm_res {
u64 result[MAX_QCOM_SCM_RETS];
};
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic);
#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
extern int scm_legacy_call_atomic(struct device *dev,
const struct qcom_scm_desc *desc,
struct qcom_scm_res *res);
extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res);
#define QCOM_SCM_SVC_BOOT 0x01
#define QCOM_SCM_BOOT_SET_ADDR 0x01
#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10
extern void __qcom_scm_cpu_power_down(u32 flags);
#define QCOM_SCM_SVC_IO 0x5
#define QCOM_SCM_IO_READ 0x1
#define QCOM_SCM_IO_WRITE 0x2
extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
#define QCOM_SCM_SVC_PIL 0x02
#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01
#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02
#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05
#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06
#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07
#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a
#define QCOM_SCM_SVC_IO 0x05
#define QCOM_SCM_IO_READ 0x01
#define QCOM_SCM_IO_WRITE 0x02
#define QCOM_SCM_SVC_INFO 0x06
#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
u32 cmd_id);
#define QCOM_SCM_SVC_MP 0x0c
#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
#define QCOM_SCM_MP_ASSIGN 0x16
#define QCOM_SCM_SVC_OCMEM 0x0f
#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_CMD_HDCP 0x01
extern int __qcom_scm_hdcp_req(struct device *dev,
struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
#define QCOM_SCM_HDCP_INVOKE 0x01
extern void __qcom_scm_init(void);
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
#define QCOM_SCM_OCMEM_SVC 0xf
#define QCOM_SCM_OCMEM_LOCK_CMD 0x1
#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2
extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset,
u32 size, u32 mode);
extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset,
u32 size);
#define QCOM_SCM_SVC_PIL 0x2
#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
#define QCOM_SCM_PAS_MSS_RESET 0xa
extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
dma_addr_t metadata_phys);
extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
phys_addr_t addr, phys_addr_t size);
extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
extern void __qcom_scm_init(void);
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
......@@ -94,25 +139,4 @@ static inline int qcom_scm_remap_error(int err)
return -EINVAL;
}
#define QCOM_SCM_SVC_MP 0xc
#define QCOM_SCM_RESTORE_SEC_CFG 2
extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_CONFIG_ERRATA1 0x3
#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
phys_addr_t src, size_t src_sz,
phys_addr_t dest, size_t dest_sz);
#endif
......@@ -45,13 +45,13 @@ config QCOM_GLINK_SSR
neighboring subsystems going up or down.
config QCOM_GSBI
tristate "QCOM General Serial Bus Interface"
depends on ARCH_QCOM || COMPILE_TEST
select MFD_SYSCON
help
Say y here to enable GSBI support. The GSBI provides control
functions for connecting the underlying serial UART, SPI, and I2C
devices to the output pins.
tristate "QCOM General Serial Bus Interface"
depends on ARCH_QCOM || COMPILE_TEST
select MFD_SYSCON
help
Say y here to enable GSBI support. The GSBI provides control
functions for connecting the underlying serial UART, SPI, and I2C
devices to the output pins.
config QCOM_LLCC
tristate "Qualcomm Technologies, Inc. LLCC driver"
......@@ -71,10 +71,10 @@ config QCOM_OCMEM
depends on ARCH_QCOM
select QCOM_SCM
help
The On Chip Memory (OCMEM) allocator allows various clients to
allocate memory from OCMEM based on performance, latency and power
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
The On Chip Memory (OCMEM) allocator allows various clients to
allocate memory from OCMEM based on performance, latency and power
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
config QCOM_PM
bool "Qualcomm Power Management"
......@@ -198,8 +198,8 @@ config QCOM_APR
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
help
Enable APR IPC protocol support between
application processor and QDSP6. APR is
used by audio driver to configure QDSP6
ASM, ADM and AFE modules.
Enable APR IPC protocol support between
application processor and QDSP6. APR is
used by audio driver to configure QDSP6
ASM, ADM and AFE modules.
endmenu
......@@ -655,8 +655,12 @@ int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
qmi->sock = qmi_sock_create(qmi, &qmi->sq);
if (IS_ERR(qmi->sock)) {
pr_err("failed to create QMI socket\n");
ret = PTR_ERR(qmi->sock);
if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) {
ret = -EPROBE_DEFER;
} else {
pr_err("failed to create QMI socket\n");
ret = PTR_ERR(qmi->sock);
}
goto err_destroy_wq;
}
......
......@@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
static struct rpmhpd sdm845_mx_ao = {
.pd = { .name = "mx_ao", },
.active_only = true,
.peer = &sdm845_mx,
.res_name = "mx.lvl",
};
......@@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
static struct rpmhpd sdm845_cx_ao = {
.pd = { .name = "cx_ao", },
.active_only = true,
.peer = &sdm845_cx,
.parent = &sdm845_mx_ao.pd,
.res_name = "cx.lvl",
......@@ -129,8 +131,62 @@ static const struct rpmhpd_desc sdm845_desc = {
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
/* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
static struct rpmhpd sm8150_mmcx = {
.pd = { .name = "mmcx", },
.peer = &sm8150_mmcx_ao,
.res_name = "mmcx.lvl",
};
static struct rpmhpd sm8150_mmcx_ao = {
.pd = { .name = "mmcx_ao", },
.active_only = true,
.peer = &sm8150_mmcx,
.res_name = "mmcx.lvl",
};
static struct rpmhpd *sm8150_rpmhpds[] = {
[SM8150_MSS] = &sdm845_mss,
[SM8150_EBI] = &sdm845_ebi,
[SM8150_LMX] = &sdm845_lmx,
[SM8150_LCX] = &sdm845_lcx,
[SM8150_GFX] = &sdm845_gfx,
[SM8150_MX] = &sdm845_mx,
[SM8150_MX_AO] = &sdm845_mx_ao,
[SM8150_CX] = &sdm845_cx,
[SM8150_CX_AO] = &sdm845_cx_ao,
[SM8150_MMCX] = &sm8150_mmcx,
[SM8150_MMCX_AO] = &sm8150_mmcx_ao,
};
static const struct rpmhpd_desc sm8150_desc = {
.rpmhpds = sm8150_rpmhpds,
.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
};
/* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = {
[SC7180_CX] = &sdm845_cx,
[SC7180_CX_AO] = &sdm845_cx_ao,
[SC7180_GFX] = &sdm845_gfx,
[SC7180_MX] = &sdm845_mx,
[SC7180_MX_AO] = &sdm845_mx_ao,
[SC7180_LMX] = &sdm845_lmx,
[SC7180_LCX] = &sdm845_lcx,
[SC7180_MSS] = &sdm845_mss,
};
static const struct rpmhpd_desc sc7180_desc = {
.rpmhpds = sc7180_rpmhpds,
.num_pds = ARRAY_SIZE(sc7180_rpmhpds),
};
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ }
};
......
......@@ -15,12 +15,36 @@
#define SDM845_GFX 7
#define SDM845_MSS 8
/* SM8150 Power Domain Indexes */
#define SM8150_MSS 0
#define SM8150_EBI 1
#define SM8150_LMX 2
#define SM8150_LCX 3
#define SM8150_GFX 4
#define SM8150_MX 5
#define SM8150_MX_AO 6
#define SM8150_CX 7
#define SM8150_CX_AO 8
#define SM8150_MMCX 9
#define SM8150_MMCX_AO 10
/* SC7180 Power Domain Indexes */
#define SC7180_CX 0
#define SC7180_CX_AO 1
#define SC7180_GFX 2
#define SC7180_MX 3
#define SC7180_MX_AO 4
#define SC7180_LMX 5
#define SC7180_LCX 6
#define SC7180_MSS 7
/* SDM845 Power Domain performance levels */
#define RPMH_REGULATOR_LEVEL_RETENTION 16
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
#define RPMH_REGULATOR_LEVEL_SVS 128
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
#define RPMH_REGULATOR_LEVEL_SVS_L2 224
#define RPMH_REGULATOR_LEVEL_NOM 256
#define RPMH_REGULATOR_LEVEL_NOM_L1 320
#define RPMH_REGULATOR_LEVEL_NOM_L2 336
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#ifndef __QCOM_SCM_H
......@@ -55,77 +55,94 @@ enum qcom_scm_sec_dev_id {
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
#if IS_ENABLED(CONFIG_QCOM_SCM)
extern bool qcom_scm_is_available(void);
extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
extern bool qcom_scm_is_available(void);
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
extern bool qcom_scm_ocmem_lock_available(void);
extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size, u32 mode);
extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size);
extern bool qcom_scm_pas_supported(u32 peripheral);
extern void qcom_scm_cpu_power_down(u32 flags);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
size_t size);
extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
phys_addr_t size);
extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
extern int qcom_scm_pas_shutdown(u32 peripheral);
extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
unsigned int *src,
const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt);
extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern bool qcom_scm_pas_supported(u32 peripheral);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
extern bool qcom_scm_restore_sec_cfg_available(void);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
unsigned int *src,
const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt);
extern bool qcom_scm_ocmem_lock_available(void);
extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size, u32 mode);
extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size);
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
#else
#include <linux/errno.h>
static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
return -ENODEV;
}
static inline
int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
{
return -ENODEV;
}
static inline bool qcom_scm_is_available(void) { return false; }
static inline bool qcom_scm_hdcp_available(void) { return false; }
static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp) { return -ENODEV; }
static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
static inline int qcom_scm_set_cold_boot_addr(void *entry,
const cpumask_t *cpus) { return -ENODEV; }
static inline int qcom_scm_set_warm_boot_addr(void *entry,
const cpumask_t *cpus) { return -ENODEV; }
static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
{ return -ENODEV; }
static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
size_t size) { return -ENODEV; }
size_t size) { return -ENODEV; }
static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
phys_addr_t size) { return -ENODEV; }
static inline int
qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
phys_addr_t size) { return -ENODEV; }
static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
{ return -ENODEV; }
static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{ return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
{ return -ENODEV; }
static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
{ return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{ return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
{ return -ENODEV; }
static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
unsigned int *src,
const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt) { return -ENODEV; }
static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
unsigned int *src, const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt) { return -ENODEV; }
static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size, u32 mode) { return -ENODEV; }
static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
u32 offset, u32 size) { return -ENODEV; }
static inline bool qcom_scm_hdcp_available(void) { return false; }
static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp) { return -ENODEV; }
static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{ return -ENODEV; }
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment