Commit 8a36c27d authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'qcom-drivers-for-4.15' of...

Merge tag 'qcom-drivers-for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux into next/drivers

Pull "Qualcomm ARM Based Driver Updates for v4.15" from Andy Gross:

* Add SCM firmware APIs for download mode and secure IO service
* Add SMEM support for cached entries
* Add SMEM support for global partition, dynamic item limit, and more hosts

* tag 'qcom-drivers-for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux:
  firmware: qcom: scm: Expose download-mode control
  firmware: qcom: scm: Expose secure IO service
  soc: qcom: smem: Increase the number of hosts
  soc: qcom: smem: Support dynamic item limit
  soc: qcom: smem: Support global partition
  soc: qcom: smem: Read version from the smem header
  soc: qcom: smem: Use le32_to_cpu for comparison
  soc: qcom: smem: Support getting cached entries
  soc: qcom: smem: Rename "uncached" accessors
parents ea9e3fbc 8c1b7dc9
...@@ -18,6 +18,8 @@ Required properties: ...@@ -18,6 +18,8 @@ Required properties:
* Core, iface, and bus clocks required for "qcom,scm" * Core, iface, and bus clocks required for "qcom,scm"
- clock-names: Must contain "core" for the core clock, "iface" for the interface - clock-names: Must contain "core" for the core clock, "iface" for the interface
clock and "bus" for the bus clock per the requirements of the compatible. clock and "bus" for the bus clock per the requirements of the compatible.
- qcom,dload-mode: phandle to the TCSR hardware block and offset of the
download mode control register (optional)
Example for MSM8916: Example for MSM8916:
......
...@@ -215,6 +215,17 @@ config QCOM_SCM_64 ...@@ -215,6 +215,17 @@ config QCOM_SCM_64
def_bool y def_bool y
depends on QCOM_SCM && ARM64 depends on QCOM_SCM && ARM64
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
depends on QCOM_SCM
help
A device with "download mode" enabled will upon an unexpected
warm-restart enter a special debug mode that allows the user to
"download" memory content over USB for offline postmortem analysis.
The feature can be enabled/disabled on the kernel command line.
Say Y here to enable "download mode" by default.
config TI_SCI_PROTOCOL config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol" tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER depends on TI_MESSAGE_MANAGER
......
...@@ -561,6 +561,12 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) ...@@ -561,6 +561,12 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
return ret ? : le32_to_cpu(out); return ret ? : le32_to_cpu(out);
} }
int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
}
int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
{ {
struct { struct {
...@@ -596,3 +602,21 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, ...@@ -596,3 +602,21 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
{ {
return -ENODEV; return -ENODEV;
} }
int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
unsigned int *val)
{
int ret;
ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
if (ret >= 0)
*val = ret;
return ret < 0 ? ret : 0;
}
int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
{
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
...@@ -439,3 +439,47 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, ...@@ -439,3 +439,47 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
return ret; return ret;
} }
int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
&desc, &res);
}
int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
unsigned int *val)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = addr;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
&desc, &res);
if (ret >= 0)
*val = res.a1;
return ret < 0 ? ret : 0;
}
int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = addr;
desc.args[1] = val;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
...@@ -19,15 +19,20 @@ ...@@ -19,15 +19,20 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/qcom_scm.h> #include <linux/qcom_scm.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/reset-controller.h> #include <linux/reset-controller.h>
#include "qcom_scm.h" #include "qcom_scm.h"
static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
module_param(download_mode, bool, 0);
#define SCM_HAS_CORE_CLK BIT(0) #define SCM_HAS_CORE_CLK BIT(0)
#define SCM_HAS_IFACE_CLK BIT(1) #define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2) #define SCM_HAS_BUS_CLK BIT(2)
...@@ -38,6 +43,8 @@ struct qcom_scm { ...@@ -38,6 +43,8 @@ struct qcom_scm {
struct clk *iface_clk; struct clk *iface_clk;
struct clk *bus_clk; struct clk *bus_clk;
struct reset_controller_dev reset; struct reset_controller_dev reset;
u64 dload_mode_addr;
}; };
static struct qcom_scm *__scm; static struct qcom_scm *__scm;
...@@ -333,6 +340,66 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) ...@@ -333,6 +340,66 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
} }
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
}
EXPORT_SYMBOL(qcom_scm_io_readl);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
{
return __qcom_scm_io_writel(__scm->dev, addr, val);
}
EXPORT_SYMBOL(qcom_scm_io_writel);
static void qcom_scm_set_download_mode(bool enable)
{
bool avail;
int ret = 0;
avail = __qcom_scm_is_call_available(__scm->dev,
QCOM_SCM_SVC_BOOT,
QCOM_SCM_SET_DLOAD_MODE);
if (avail) {
ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
} else if (__scm->dload_mode_addr) {
ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
if (ret)
dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
}
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
struct device_node *np = dev->of_node;
struct resource res;
u32 offset;
int ret;
tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
if (!tcsr)
return 0;
ret = of_address_to_resource(tcsr, 0, &res);
of_node_put(tcsr);
if (ret)
return ret;
ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
if (ret < 0)
return ret;
*addr = res.start + offset;
return 0;
}
/** /**
* qcom_scm_is_available() - Checks if SCM is available * qcom_scm_is_available() - Checks if SCM is available
*/ */
...@@ -358,6 +425,10 @@ static int qcom_scm_probe(struct platform_device *pdev) ...@@ -358,6 +425,10 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (!scm) if (!scm)
return -ENOMEM; return -ENOMEM;
ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
if (ret < 0)
return ret;
clks = (unsigned long)of_device_get_match_data(&pdev->dev); clks = (unsigned long)of_device_get_match_data(&pdev->dev);
if (clks & SCM_HAS_CORE_CLK) { if (clks & SCM_HAS_CORE_CLK) {
scm->core_clk = devm_clk_get(&pdev->dev, "core"); scm->core_clk = devm_clk_get(&pdev->dev, "core");
...@@ -406,9 +477,24 @@ static int qcom_scm_probe(struct platform_device *pdev) ...@@ -406,9 +477,24 @@ static int qcom_scm_probe(struct platform_device *pdev)
__qcom_scm_init(); __qcom_scm_init();
/*
* If requested enable "download mode", from this point on warmboot
* will cause the the boot stages to enter download mode, unless
* disabled below by a clean shutdown/reboot.
*/
if (download_mode)
qcom_scm_set_download_mode(true);
return 0; return 0;
} }
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
if (download_mode)
qcom_scm_set_download_mode(false);
}
static const struct of_device_id qcom_scm_dt_match[] = { static const struct of_device_id qcom_scm_dt_match[] = {
{ .compatible = "qcom,scm-apq8064", { .compatible = "qcom,scm-apq8064",
/* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
...@@ -436,6 +522,7 @@ static struct platform_driver qcom_scm_driver = { ...@@ -436,6 +522,7 @@ static struct platform_driver qcom_scm_driver = {
.of_match_table = qcom_scm_dt_match, .of_match_table = qcom_scm_dt_match,
}, },
.probe = qcom_scm_probe, .probe = qcom_scm_probe,
.shutdown = qcom_scm_shutdown,
}; };
static int __init qcom_scm_init(void) static int __init qcom_scm_init(void)
......
...@@ -14,9 +14,11 @@ ...@@ -14,9 +14,11 @@
#define QCOM_SCM_SVC_BOOT 0x1 #define QCOM_SCM_SVC_BOOT 0x1
#define QCOM_SCM_BOOT_ADDR 0x1 #define QCOM_SCM_BOOT_ADDR 0x1
#define QCOM_SCM_SET_DLOAD_MODE 0x10
#define QCOM_SCM_BOOT_ADDR_MC 0x11 #define QCOM_SCM_BOOT_ADDR_MC 0x11
#define QCOM_SCM_SET_REMOTE_STATE 0xa #define QCOM_SCM_SET_REMOTE_STATE 0xa
extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
#define QCOM_SCM_FLAG_HLOS 0x01 #define QCOM_SCM_FLAG_HLOS 0x01
#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
...@@ -30,6 +32,12 @@ extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); ...@@ -30,6 +32,12 @@ extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 #define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10
extern void __qcom_scm_cpu_power_down(u32 flags); extern void __qcom_scm_cpu_power_down(u32 flags);
#define QCOM_SCM_SVC_IO 0x5
#define QCOM_SCM_IO_READ 0x1
#define QCOM_SCM_IO_WRITE 0x2
extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
#define QCOM_SCM_SVC_INFO 0x6 #define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1 #define QCOM_IS_CALL_AVAIL_CMD 0x1
extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
......
...@@ -52,8 +52,13 @@ ...@@ -52,8 +52,13 @@
* *
* Items in the non-cached region are allocated from the start of the partition * Items in the non-cached region are allocated from the start of the partition
* while items in the cached region are allocated from the end. The free area * while items in the cached region are allocated from the end. The free area
* is hence the region between the cached and non-cached offsets. * is hence the region between the cached and non-cached offsets. The header of
* cached items comes after the data.
* *
* Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
* for the global heap. A new global partition is created from the global heap
* region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
* set by the bootloader.
* *
* To synchronize allocations in the shared memory heaps a remote spinlock must * To synchronize allocations in the shared memory heaps a remote spinlock must
* be held - currently lock number 3 of the sfpb or tcsr is used for this on all * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
...@@ -62,13 +67,13 @@ ...@@ -62,13 +67,13 @@
*/ */
/* /*
* Item 3 of the global heap contains an array of versions for the various * The version member of the smem header contains an array of versions for the
* software components in the SoC. We verify that the boot loader version is * various software components in the SoC. We verify that the boot loader
* what the expected version (SMEM_EXPECTED_VERSION) as a sanity check. * version is a valid version as a sanity check.
*/ */
#define SMEM_ITEM_VERSION 3 #define SMEM_MASTER_SBL_VERSION_INDEX 7
#define SMEM_MASTER_SBL_VERSION_INDEX 7 #define SMEM_GLOBAL_HEAP_VERSION 11
#define SMEM_EXPECTED_VERSION 11 #define SMEM_GLOBAL_PART_VERSION 12
/* /*
* The first 8 items are only to be allocated by the boot loader while * The first 8 items are only to be allocated by the boot loader while
...@@ -82,8 +87,11 @@ ...@@ -82,8 +87,11 @@
/* Processor/host identifier for the application processor */ /* Processor/host identifier for the application processor */
#define SMEM_HOST_APPS 0 #define SMEM_HOST_APPS 0
/* Processor/host identifier for the global partition */
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */ /* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 9 #define SMEM_HOST_COUNT 10
/** /**
* struct smem_proc_comm - proc_comm communication struct (legacy) * struct smem_proc_comm - proc_comm communication struct (legacy)
...@@ -140,6 +148,7 @@ struct smem_header { ...@@ -140,6 +148,7 @@ struct smem_header {
* @flags: flags for the partition (currently unused) * @flags: flags for the partition (currently unused)
* @host0: first processor/host with access to this partition * @host0: first processor/host with access to this partition
* @host1: second processor/host with access to this partition * @host1: second processor/host with access to this partition
* @cacheline: alignment for "cached" entries
* @reserved: reserved entries for later use * @reserved: reserved entries for later use
*/ */
struct smem_ptable_entry { struct smem_ptable_entry {
...@@ -148,7 +157,8 @@ struct smem_ptable_entry { ...@@ -148,7 +157,8 @@ struct smem_ptable_entry {
__le32 flags; __le32 flags;
__le16 host0; __le16 host0;
__le16 host1; __le16 host1;
__le32 reserved[8]; __le32 cacheline;
__le32 reserved[7];
}; };
/** /**
...@@ -212,6 +222,24 @@ struct smem_private_entry { ...@@ -212,6 +222,24 @@ struct smem_private_entry {
}; };
#define SMEM_PRIVATE_CANARY 0xa5a5 #define SMEM_PRIVATE_CANARY 0xa5a5
/**
* struct smem_info - smem region info located after the table of contents
* @magic: magic number, must be SMEM_INFO_MAGIC
* @size: size of the smem region
* @base_addr: base address of the smem region
* @reserved: for now reserved entry
* @num_items: highest accepted item number
*/
struct smem_info {
u8 magic[4];
__le32 size;
__le32 base_addr;
__le32 reserved;
__le16 num_items;
};
static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
/** /**
* struct smem_region - representation of a chunk of memory used for smem * struct smem_region - representation of a chunk of memory used for smem
* @aux_base: identifier of aux_mem base * @aux_base: identifier of aux_mem base
...@@ -228,8 +256,12 @@ struct smem_region { ...@@ -228,8 +256,12 @@ struct smem_region {
* struct qcom_smem - device data for the smem device * struct qcom_smem - device data for the smem device
* @dev: device pointer * @dev: device pointer
* @hwlock: reference to a hwspinlock * @hwlock: reference to a hwspinlock
* @global_partition: pointer to global partition when in use
* @global_cacheline: cacheline size for global partition
* @partitions: list of pointers to partitions affecting the current * @partitions: list of pointers to partitions affecting the current
* processor/host * processor/host
* @cacheline: list of cacheline sizes for each host
* @item_count: max accepted item number
* @num_regions: number of @regions * @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory * @regions: list of the memory regions defining the shared memory
*/ */
...@@ -238,21 +270,33 @@ struct qcom_smem { ...@@ -238,21 +270,33 @@ struct qcom_smem {
struct hwspinlock *hwlock; struct hwspinlock *hwlock;
struct smem_partition_header *global_partition;
size_t global_cacheline;
struct smem_partition_header *partitions[SMEM_HOST_COUNT]; struct smem_partition_header *partitions[SMEM_HOST_COUNT];
size_t cacheline[SMEM_HOST_COUNT];
u32 item_count;
unsigned num_regions; unsigned num_regions;
struct smem_region regions[0]; struct smem_region regions[0];
}; };
static struct smem_private_entry * static struct smem_private_entry *
phdr_to_last_private_entry(struct smem_partition_header *phdr) phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
{ {
void *p = phdr; void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_uncached); return p + le32_to_cpu(phdr->offset_free_uncached);
} }
static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr) static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
size_t cacheline)
{
void *p = phdr;
return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
}
static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
{ {
void *p = phdr; void *p = phdr;
...@@ -260,7 +304,7 @@ static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr) ...@@ -260,7 +304,7 @@ static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
} }
static struct smem_private_entry * static struct smem_private_entry *
phdr_to_first_private_entry(struct smem_partition_header *phdr) phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
{ {
void *p = phdr; void *p = phdr;
...@@ -268,7 +312,7 @@ phdr_to_first_private_entry(struct smem_partition_header *phdr) ...@@ -268,7 +312,7 @@ phdr_to_first_private_entry(struct smem_partition_header *phdr)
} }
static struct smem_private_entry * static struct smem_private_entry *
private_entry_next(struct smem_private_entry *e) uncached_entry_next(struct smem_private_entry *e)
{ {
void *p = e; void *p = e;
...@@ -276,13 +320,28 @@ private_entry_next(struct smem_private_entry *e) ...@@ -276,13 +320,28 @@ private_entry_next(struct smem_private_entry *e)
le32_to_cpu(e->size); le32_to_cpu(e->size);
} }
static void *entry_to_item(struct smem_private_entry *e) static struct smem_private_entry *
cached_entry_next(struct smem_private_entry *e, size_t cacheline)
{
void *p = e;
return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
}
static void *uncached_entry_to_item(struct smem_private_entry *e)
{ {
void *p = e; void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
} }
static void *cached_entry_to_item(struct smem_private_entry *e)
{
void *p = e;
return p - le32_to_cpu(e->size);
}
/* Pointer to the one and only smem handle */ /* Pointer to the one and only smem handle */
static struct qcom_smem *__smem; static struct qcom_smem *__smem;
...@@ -290,32 +349,30 @@ static struct qcom_smem *__smem; ...@@ -290,32 +349,30 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000 #define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem, static int qcom_smem_alloc_private(struct qcom_smem *smem,
unsigned host, struct smem_partition_header *phdr,
unsigned item, unsigned item,
size_t size) size_t size)
{ {
struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end; struct smem_private_entry *hdr, *end;
size_t alloc_size; size_t alloc_size;
void *cached; void *cached;
phdr = smem->partitions[host]; hdr = phdr_to_first_uncached_entry(phdr);
hdr = phdr_to_first_private_entry(phdr); end = phdr_to_last_uncached_entry(phdr);
end = phdr_to_last_private_entry(phdr); cached = phdr_to_last_cached_entry(phdr);
cached = phdr_to_first_cached_entry(phdr);
while (hdr < end) { while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) { if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev, dev_err(smem->dev,
"Found invalid canary in host %d partition\n", "Found invalid canary in hosts %d:%d partition\n",
host); phdr->host0, phdr->host1);
return -EINVAL; return -EINVAL;
} }
if (le16_to_cpu(hdr->item) == item) if (le16_to_cpu(hdr->item) == item)
return -EEXIST; return -EEXIST;
hdr = private_entry_next(hdr); hdr = uncached_entry_next(hdr);
} }
/* Check that we don't grow into the cached region */ /* Check that we don't grow into the cached region */
...@@ -346,11 +403,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -346,11 +403,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
unsigned item, unsigned item,
size_t size) size_t size)
{ {
struct smem_header *header;
struct smem_global_entry *entry; struct smem_global_entry *entry;
struct smem_header *header;
if (WARN_ON(item >= SMEM_ITEM_COUNT))
return -EINVAL;
header = smem->regions[0].virt_base; header = smem->regions[0].virt_base;
entry = &header->toc[item]; entry = &header->toc[item];
...@@ -389,6 +443,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -389,6 +443,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/ */
int qcom_smem_alloc(unsigned host, unsigned item, size_t size) int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{ {
struct smem_partition_header *phdr;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -401,16 +456,24 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) ...@@ -401,16 +456,24 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
return -EINVAL; return -EINVAL;
} }
if (WARN_ON(item >= __smem->item_count))
return -EINVAL;
ret = hwspin_lock_timeout_irqsave(__smem->hwlock, ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT, HWSPINLOCK_TIMEOUT,
&flags); &flags);
if (ret) if (ret)
return ret; return ret;
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
ret = qcom_smem_alloc_private(__smem, host, item, size); phdr = __smem->partitions[host];
else ret = qcom_smem_alloc_private(__smem, phdr, item, size);
} else if (__smem->global_partition) {
phdr = __smem->global_partition;
ret = qcom_smem_alloc_private(__smem, phdr, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size); ret = qcom_smem_alloc_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags); hwspin_unlock_irqrestore(__smem->hwlock, &flags);
...@@ -428,9 +491,6 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, ...@@ -428,9 +491,6 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
u32 aux_base; u32 aux_base;
unsigned i; unsigned i;
if (WARN_ON(item >= SMEM_ITEM_COUNT))
return ERR_PTR(-EINVAL);
header = smem->regions[0].virt_base; header = smem->regions[0].virt_base;
entry = &header->toc[item]; entry = &header->toc[item];
if (!entry->allocated) if (!entry->allocated)
...@@ -452,37 +512,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, ...@@ -452,37 +512,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
} }
static void *qcom_smem_get_private(struct qcom_smem *smem, static void *qcom_smem_get_private(struct qcom_smem *smem,
unsigned host, struct smem_partition_header *phdr,
size_t cacheline,
unsigned item, unsigned item,
size_t *size) size_t *size)
{ {
struct smem_partition_header *phdr;
struct smem_private_entry *e, *end; struct smem_private_entry *e, *end;
phdr = smem->partitions[host]; e = phdr_to_first_uncached_entry(phdr);
e = phdr_to_first_private_entry(phdr); end = phdr_to_last_uncached_entry(phdr);
end = phdr_to_last_private_entry(phdr);
while (e < end) { while (e < end) {
if (e->canary != SMEM_PRIVATE_CANARY) { if (e->canary != SMEM_PRIVATE_CANARY)
dev_err(smem->dev, goto invalid_canary;
"Found invalid canary in host %d partition\n",
host); if (le16_to_cpu(e->item) == item) {
return ERR_PTR(-EINVAL); if (size != NULL)
*size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
return uncached_entry_to_item(e);
} }
e = uncached_entry_next(e);
}
/* Item was not found in the uncached list, search the cached list */
e = phdr_to_first_cached_entry(phdr, cacheline);
end = phdr_to_last_cached_entry(phdr);
while (e > end) {
if (e->canary != SMEM_PRIVATE_CANARY)
goto invalid_canary;
if (le16_to_cpu(e->item) == item) { if (le16_to_cpu(e->item) == item) {
if (size != NULL) if (size != NULL)
*size = le32_to_cpu(e->size) - *size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data); le16_to_cpu(e->padding_data);
return entry_to_item(e); return cached_entry_to_item(e);
} }
e = private_entry_next(e); e = cached_entry_next(e, cacheline);
} }
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
invalid_canary:
dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n",
phdr->host0, phdr->host1);
return ERR_PTR(-EINVAL);
} }
/** /**
...@@ -496,23 +577,35 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, ...@@ -496,23 +577,35 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/ */
void *qcom_smem_get(unsigned host, unsigned item, size_t *size) void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{ {
struct smem_partition_header *phdr;
unsigned long flags; unsigned long flags;
size_t cacheln;
int ret; int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER); void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem) if (!__smem)
return ptr; return ptr;
if (WARN_ON(item >= __smem->item_count))
return ERR_PTR(-EINVAL);
ret = hwspin_lock_timeout_irqsave(__smem->hwlock, ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT, HWSPINLOCK_TIMEOUT,
&flags); &flags);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
ptr = qcom_smem_get_private(__smem, host, item, size); phdr = __smem->partitions[host];
else cacheln = __smem->cacheline[host];
ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
} else if (__smem->global_partition) {
phdr = __smem->global_partition;
cacheln = __smem->global_cacheline;
ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size); ptr = qcom_smem_get_global(__smem, item, size);
}
hwspin_unlock_irqrestore(__smem->hwlock, &flags); hwspin_unlock_irqrestore(__smem->hwlock, &flags);
...@@ -541,6 +634,10 @@ int qcom_smem_get_free_space(unsigned host) ...@@ -541,6 +634,10 @@ int qcom_smem_get_free_space(unsigned host)
phdr = __smem->partitions[host]; phdr = __smem->partitions[host];
ret = le32_to_cpu(phdr->offset_free_cached) - ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached); le32_to_cpu(phdr->offset_free_uncached);
} else if (__smem->global_partition) {
phdr = __smem->global_partition;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
} else { } else {
header = __smem->regions[0].virt_base; header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available); ret = le32_to_cpu(header->available);
...@@ -552,44 +649,131 @@ EXPORT_SYMBOL(qcom_smem_get_free_space); ...@@ -552,44 +649,131 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem) static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{ {
struct smem_header *header;
__le32 *versions; __le32 *versions;
size_t size;
versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); header = smem->regions[0].virt_base;
if (IS_ERR(versions)) { versions = header->version;
dev_err(smem->dev, "Unable to read the version item\n");
return -ENOENT;
}
if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
dev_err(smem->dev, "Version item is too small\n");
return -EINVAL;
}
return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
} }
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
unsigned local_host)
{ {
struct smem_partition_header *header;
struct smem_ptable_entry *entry;
struct smem_ptable *ptable; struct smem_ptable *ptable;
unsigned remote_host; u32 version;
u32 version, host0, host1;
int i;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return 0; return ERR_PTR(-ENOENT);
version = le32_to_cpu(ptable->version); version = le32_to_cpu(ptable->version);
if (version != 1) { if (version != 1) {
dev_err(smem->dev, dev_err(smem->dev,
"Unsupported partition header version %d\n", version); "Unsupported partition header version %d\n", version);
return ERR_PTR(-EINVAL);
}
return ptable;
}
static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
{
struct smem_ptable *ptable;
struct smem_info *info;
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR_OR_NULL(ptable))
return SMEM_ITEM_COUNT;
info = (struct smem_info *)&ptable->entry[ptable->num_entries];
if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
return SMEM_ITEM_COUNT;
return le16_to_cpu(info->num_items);
}
static int qcom_smem_set_global_partition(struct qcom_smem *smem)
{
struct smem_partition_header *header;
struct smem_ptable_entry *entry = NULL;
struct smem_ptable *ptable;
u32 host0, host1, size;
int i;
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR(ptable))
return PTR_ERR(ptable);
for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0);
host1 = le16_to_cpu(entry->host1);
if (host0 == SMEM_GLOBAL_HOST && host0 == host1)
break;
}
if (!entry) {
dev_err(smem->dev, "Missing entry for global partition\n");
return -EINVAL;
}
if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
dev_err(smem->dev, "Invalid entry for global partition\n");
return -EINVAL;
}
if (smem->global_partition) {
dev_err(smem->dev, "Already found the global partition\n");
return -EINVAL;
}
header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
host0 = le16_to_cpu(header->host0);
host1 = le16_to_cpu(header->host1);
if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
dev_err(smem->dev, "Global partition has invalid magic\n");
return -EINVAL;
}
if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
dev_err(smem->dev, "Global partition hosts are invalid\n");
return -EINVAL;
}
if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
dev_err(smem->dev, "Global partition has invalid size\n");
return -EINVAL; return -EINVAL;
} }
size = le32_to_cpu(header->offset_free_uncached);
if (size > le32_to_cpu(header->size)) {
dev_err(smem->dev,
"Global partition has invalid free pointer\n");
return -EINVAL;
}
smem->global_partition = header;
smem->global_cacheline = le32_to_cpu(entry->cacheline);
return 0;
}
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
unsigned int local_host)
{
struct smem_partition_header *header;
struct smem_ptable_entry *entry;
struct smem_ptable *ptable;
unsigned int remote_host;
u32 host0, host1;
int i;
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR(ptable))
return PTR_ERR(ptable);
for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i]; entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0); host0 = le16_to_cpu(entry->host0);
...@@ -646,7 +830,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -646,7 +830,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
if (header->size != entry->size) { if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d has invalid size\n", i); "Partition %d has invalid size\n", i);
return -EINVAL; return -EINVAL;
...@@ -659,6 +843,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -659,6 +843,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
} }
smem->partitions[remote_host] = header; smem->partitions[remote_host] = header;
smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
} }
return 0; return 0;
...@@ -729,13 +914,23 @@ static int qcom_smem_probe(struct platform_device *pdev) ...@@ -729,13 +914,23 @@ static int qcom_smem_probe(struct platform_device *pdev)
} }
version = qcom_smem_get_sbl_version(smem); version = qcom_smem_get_sbl_version(smem);
if (version >> 16 != SMEM_EXPECTED_VERSION) { switch (version >> 16) {
case SMEM_GLOBAL_PART_VERSION:
ret = qcom_smem_set_global_partition(smem);
if (ret < 0)
return ret;
smem->item_count = qcom_smem_get_item_count(smem);
break;
case SMEM_GLOBAL_HEAP_VERSION:
smem->item_count = SMEM_ITEM_COUNT;
break;
default:
dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
return -EINVAL; return -EINVAL;
} }
ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
if (ret < 0) if (ret < 0 && ret != -ENOENT)
return ret; return ret;
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
......
...@@ -43,6 +43,8 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id); ...@@ -43,6 +43,8 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
#else #else
static inline static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
...@@ -73,5 +75,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } ...@@ -73,5 +75,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; } static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment