Commit e0bb3964 authored by Olof Johansson's avatar Olof Johansson

Merge tag 'tc2-pm' of git://git.linaro.org/people/pawelmoll/linux into next/soc

From Pawel Moll and Nicolas Pitre:
- Fixes to the existing Vexpress DCSCB backend.

- Lorenzo's minimal SPC driver required by the TC2 MCPM backend.

- The MCPM backend enabling SMP secondary boot and CPU hotplug
  on the VExpress TC2 big.LITTLE platform.

- MCPM suspend method to the TC2 backend allowing basic CPU
  idle/suspend.  The cpuidle driver that hooks into this will be
  submitted separately.

* tag 'tc2-pm' of git://git.linaro.org/people/pawelmoll/linux:
  ARM: vexpress/TC2: implement PM suspend method
  ARM: vexpress/TC2: basic PM support
  ARM: vexpress: Add SCC to V2P-CA15_A7's device tree
  ARM: vexpress/TC2: add Serial Power Controller (SPC) support
  ARM: vexpress/dcscb: fix cache disabling sequences
Signed-off-by: default avatarOlof Johansson <olof@lixom.net>
parents 3b2f64d0 e607b0f9
ARM Versatile Express Serial Configuration Controller
-----------------------------------------------------
Test chips for ARM Versatile Express platform implement SCC (Serial
Configuration Controller) interface, used to set initial conditions
for the test chip.
In some cases its registers are also mapped in normal address space
and can be used to obtain runtime information about the chip internals
(like silicon temperature sensors) and as interface to other subsystems
like platform configuration control and power management.
Required properties:
- compatible value: "arm,vexpress-scc,<model>", "arm,vexpress-scc";
where <model> is the full tile model name (as used
in the tile's Technical Reference Manual),
eg. for Coretile Express A15x2 A7x3 (V2P-CA15_A7):
compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc";
Optional properties:
- reg: when the SCC is memory mapped, physical address and size of the
registers window
- interrupts: when the SCC can generate a system-level interrupt
Example:
scc@7fff0000 {
compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc";
reg = <0 0x7fff0000 0 0x1000>;
interrupts = <0 95 4>;
};
...@@ -125,6 +125,12 @@ dma@7ff00000 { ...@@ -125,6 +125,12 @@ dma@7ff00000 {
clock-names = "apb_pclk"; clock-names = "apb_pclk";
}; };
scc@7fff0000 {
compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc";
reg = <0 0x7fff0000 0 0x1000>;
interrupts = <0 95 4>;
};
timer { timer {
compatible = "arm,armv7-timer"; compatible = "arm,armv7-timer";
interrupts = <1 13 0xf08>, interrupts = <1 13 0xf08>,
......
...@@ -66,4 +66,12 @@ config ARCH_VEXPRESS_DCSCB ...@@ -66,4 +66,12 @@ config ARCH_VEXPRESS_DCSCB
This is needed to provide CPU and cluster power management This is needed to provide CPU and cluster power management
on RTSM implementing big.LITTLE. on RTSM implementing big.LITTLE.
config ARCH_VEXPRESS_TC2_PM
bool "Versatile Express TC2 power management"
depends on MCPM
select ARM_CCI
help
Support for CPU and cluster power management on Versatile Express
with a TC2 (A15x2 A7x3) big.LITTLE core tile.
endmenu endmenu
...@@ -7,5 +7,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ ...@@ -7,5 +7,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
obj-y := v2m.o obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o
obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
...@@ -136,14 +136,29 @@ static void dcscb_power_down(void) ...@@ -136,14 +136,29 @@ static void dcscb_power_down(void)
/* /*
* Flush all cache levels for this cluster. * Flush all cache levels for this cluster.
* *
* A15/A7 can hit in the cache with SCTLR.C=0, so we don't need * To do so we do:
* a preliminary flush here for those CPUs. At least, that's * - Clear the SCTLR.C bit to prevent further cache allocations
* the theory -- without the extra flush, Linux explodes on * - Flush the whole cache
* RTSM (to be investigated). * - Clear the ACTLR "SMP" bit to disable local coherency
*
* Let's do it in the safest possible way i.e. with
* no memory access within the following sequence
* including to the stack.
*/ */
flush_cache_all(); asm volatile(
set_cr(get_cr() & ~CR_C); "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
flush_cache_all(); "bic r0, r0, #"__stringify(CR_C)" \n\t"
"mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
"isb \n\t"
"bl v7_flush_dcache_all \n\t"
"clrex \n\t"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
"isb \n\t"
"dsb "
: : : "r0","r1","r2","r3","r4","r5","r6","r7",
"r9","r10","r11","lr","memory");
/* /*
* This is a harmless no-op. On platforms with a real * This is a harmless no-op. On platforms with a real
...@@ -152,9 +167,6 @@ static void dcscb_power_down(void) ...@@ -152,9 +167,6 @@ static void dcscb_power_down(void)
*/ */
outer_flush_all(); outer_flush_all();
/* Disable local coherency by clearing the ACTLR "SMP" bit: */
set_auxcr(get_auxcr() & ~(1 << 6));
/* /*
* Disable cluster-level coherency by masking * Disable cluster-level coherency by masking
* incoming snoops and DVM messages: * incoming snoops and DVM messages:
...@@ -167,18 +179,22 @@ static void dcscb_power_down(void) ...@@ -167,18 +179,22 @@ static void dcscb_power_down(void)
/* /*
* Flush the local CPU cache. * Flush the local CPU cache.
* * Let's do it in the safest possible way as above.
* A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
* a preliminary flush here for those CPUs. At least, that's
* the theory -- without the extra flush, Linux explodes on
* RTSM (to be investigated).
*/ */
flush_cache_louis(); asm volatile(
set_cr(get_cr() & ~CR_C); "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
flush_cache_louis(); "bic r0, r0, #"__stringify(CR_C)" \n\t"
"mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
/* Disable local coherency by clearing the ACTLR "SMP" bit: */ "isb \n\t"
set_auxcr(get_auxcr() & ~(1 << 6)); "bl v7_flush_dcache_louis \n\t"
"clrex \n\t"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
"isb \n\t"
"dsb "
: : : "r0","r1","r2","r3","r4","r5","r6","r7",
"r9","r10","r11","lr","memory");
} }
__mcpm_cpu_down(cpu, cluster); __mcpm_cpu_down(cpu, cluster);
......
/*
* Versatile Express Serial Power Controller (SPC) support
*
* Copyright (C) 2013 ARM Ltd.
*
* Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
* Achin Gupta <achin.gupta@arm.com>
* Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#define SPCLOG "vexpress-spc: "
/* SPC wake-up IRQs status and mask */
#define WAKE_INT_MASK 0x24
#define WAKE_INT_RAW 0x28
#define WAKE_INT_STAT 0x2c
/* SPC power down registers */
#define A15_PWRDN_EN 0x30
#define A7_PWRDN_EN 0x34
/* SPC per-CPU mailboxes */
#define A15_BX_ADDR0 0x68
#define A7_BX_ADDR0 0x78
/* wake-up interrupt masks */
#define GBL_WAKEUP_INT_MSK (0x3 << 10)
/* TC2 static dual-cluster configuration */
#define MAX_CLUSTERS 2
struct ve_spc_drvdata {
void __iomem *baseaddr;
/*
* A15s cluster identifier
* It corresponds to A15 processors MPIDR[15:8] bitfield
*/
u32 a15_clusid;
};
static struct ve_spc_drvdata *info;
static inline bool cluster_is_a15(u32 cluster)
{
return cluster == info->a15_clusid;
}
/**
* ve_spc_global_wakeup_irq()
*
* Function to set/clear global wakeup IRQs. Not protected by locking since
* it might be used in code paths where normal cacheable locks are not
* working. Locking must be provided by the caller to ensure atomicity.
*
* @set: if true, global wake-up IRQs are set, if false they are cleared
*/
void ve_spc_global_wakeup_irq(bool set)
{
u32 reg;
reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
if (set)
reg |= GBL_WAKEUP_INT_MSK;
else
reg &= ~GBL_WAKEUP_INT_MSK;
writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
}
/**
* ve_spc_cpu_wakeup_irq()
*
* Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
* it might be used in code paths where normal cacheable locks are not
* working. Locking must be provided by the caller to ensure atomicity.
*
* @cluster: mpidr[15:8] bitfield describing cluster affinity level
* @cpu: mpidr[7:0] bitfield describing cpu affinity level
* @set: if true, wake-up IRQs are set, if false they are cleared
*/
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
{
u32 mask, reg;
if (cluster >= MAX_CLUSTERS)
return;
mask = 1 << cpu;
if (!cluster_is_a15(cluster))
mask <<= 4;
reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
if (set)
reg |= mask;
else
reg &= ~mask;
writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
}
/**
* ve_spc_set_resume_addr() - set the jump address used for warm boot
*
* @cluster: mpidr[15:8] bitfield describing cluster affinity level
* @cpu: mpidr[7:0] bitfield describing cpu affinity level
* @addr: physical resume address
*/
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
{
void __iomem *baseaddr;
if (cluster >= MAX_CLUSTERS)
return;
if (cluster_is_a15(cluster))
baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
else
baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
writel_relaxed(addr, baseaddr);
}
/**
* ve_spc_powerdown()
*
* Function to enable/disable cluster powerdown. Not protected by locking
* since it might be used in code paths where normal cacheable locks are not
* working. Locking must be provided by the caller to ensure atomicity.
*
* @cluster: mpidr[15:8] bitfield describing cluster affinity level
* @enable: if true enables powerdown, if false disables it
*/
void ve_spc_powerdown(u32 cluster, bool enable)
{
u32 pwdrn_reg;
if (cluster >= MAX_CLUSTERS)
return;
pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
writel_relaxed(enable, info->baseaddr + pwdrn_reg);
}
int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
{
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_err(SPCLOG "unable to allocate mem\n");
return -ENOMEM;
}
info->baseaddr = baseaddr;
info->a15_clusid = a15_clusid;
/*
* Multi-cluster systems may need this data when non-coherent, during
* cluster power-up/power-down. Make sure driver info reaches main
* memory.
*/
sync_cache_w(info);
sync_cache_w(&info);
return 0;
}
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*/
#ifndef __SPC_H_
#define __SPC_H_
int __init ve_spc_init(void __iomem *base, u32 a15_clusid);
void ve_spc_global_wakeup_irq(bool set);
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
void ve_spc_powerdown(u32 cluster, bool enable);
#endif
/*
* arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
*
* Created by: Nicolas Pitre, October 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* Some portions of this file were originally written by Achin Gupta
* Copyright: (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <asm/mcpm.h>
#include <asm/proc-fns.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <linux/arm-cci.h>
#include "spc.h"
/* SCC conf registers */
#define A15_CONF 0x400
#define A7_CONF 0x500
#define SYS_INFO 0x700
#define SPC_BASE 0xb00
/*
* We can't use regular spinlocks. In the switcher case, it is possible
* for an outbound CPU to call power_down() after its inbound counterpart
* is already live using the same logical CPU number which trips lockdep
* debugging.
*/
static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
#define TC2_CLUSTERS 2
#define TC2_MAX_CPUS_PER_CLUSTER 3
static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
/* Keep per-cpu usage count to cope with unordered up/down requests */
static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
#define tc2_cluster_unused(cluster) \
(!tc2_pm_use_count[0][cluster] && \
!tc2_pm_use_count[1][cluster] && \
!tc2_pm_use_count[2][cluster])
static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
return -EINVAL;
/*
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
* variant exists, we need to disable IRQs manually here.
*/
local_irq_disable();
arch_spin_lock(&tc2_pm_lock);
if (tc2_cluster_unused(cluster))
ve_spc_powerdown(cluster, false);
tc2_pm_use_count[cpu][cluster]++;
if (tc2_pm_use_count[cpu][cluster] == 1) {
ve_spc_set_resume_addr(cluster, cpu,
virt_to_phys(mcpm_entry_point));
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
} else if (tc2_pm_use_count[cpu][cluster] != 2) {
/*
* The only possible values are:
* 0 = CPU down
* 1 = CPU (still) up
* 2 = CPU requested to be up before it had a chance
* to actually make itself down.
* Any other value is a bug.
*/
BUG();
}
arch_spin_unlock(&tc2_pm_lock);
local_irq_enable();
return 0;
}
static void tc2_pm_down(u64 residency)
{
unsigned int mpidr, cpu, cluster;
bool last_man = false, skip_wfi = false;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
__mcpm_cpu_going_down(cpu, cluster);
arch_spin_lock(&tc2_pm_lock);
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
tc2_pm_use_count[cpu][cluster]--;
if (tc2_pm_use_count[cpu][cluster] == 0) {
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
if (tc2_cluster_unused(cluster)) {
ve_spc_powerdown(cluster, true);
ve_spc_global_wakeup_irq(true);
last_man = true;
}
} else if (tc2_pm_use_count[cpu][cluster] == 1) {
/*
* A power_up request went ahead of us.
* Even if we do not want to shut this CPU down,
* the caller expects a certain state as if the WFI
* was aborted. So let's continue with cache cleaning.
*/
skip_wfi = true;
} else
BUG();
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&tc2_pm_lock);
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
/*
* On the Cortex-A15 we need to disable
* L2 prefetching before flushing the cache.
*/
asm volatile(
"mcr p15, 1, %0, c15, c0, 3 \n\t"
"isb \n\t"
"dsb "
: : "r" (0x400) );
}
/*
* We need to disable and flush the whole (L1 and L2) cache.
* Let's do it in the safest possible way i.e. with
* no memory access within the following sequence
* including the stack.
*/
asm volatile(
"mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
"bic r0, r0, #"__stringify(CR_C)" \n\t"
"mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
"isb \n\t"
"bl v7_flush_dcache_all \n\t"
"clrex \n\t"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
"isb \n\t"
"dsb "
: : : "r0","r1","r2","r3","r4","r5","r6","r7",
"r9","r10","r11","lr","memory");
cci_disable_port_by_cpu(mpidr);
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
} else {
/*
* If last man then undo any setup done previously.
*/
if (last_man) {
ve_spc_powerdown(cluster, false);
ve_spc_global_wakeup_irq(false);
}
arch_spin_unlock(&tc2_pm_lock);
/*
* We need to disable and flush only the L1 cache.
* Let's do it in the safest possible way as above.
*/
asm volatile(
"mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
"bic r0, r0, #"__stringify(CR_C)" \n\t"
"mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
"isb \n\t"
"bl v7_flush_dcache_louis \n\t"
"clrex \n\t"
"mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
"mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
"isb \n\t"
"dsb "
: : : "r0","r1","r2","r3","r4","r5","r6","r7",
"r9","r10","r11","lr","memory");
}
__mcpm_cpu_down(cpu, cluster);
/* Now we are prepared for power-down, do it: */
if (!skip_wfi)
wfi();
/* Not dead at this point? Let our caller cope. */
}
static void tc2_pm_power_down(void)
{
tc2_pm_down(0);
}
static void tc2_pm_suspend(u64 residency)
{
unsigned int mpidr, cpu, cluster;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
tc2_pm_down(residency);
}
static void tc2_pm_powered_up(void)
{
unsigned int mpidr, cpu, cluster;
unsigned long flags;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
local_irq_save(flags);
arch_spin_lock(&tc2_pm_lock);
if (tc2_cluster_unused(cluster)) {
ve_spc_powerdown(cluster, false);
ve_spc_global_wakeup_irq(false);
}
if (!tc2_pm_use_count[cpu][cluster])
tc2_pm_use_count[cpu][cluster] = 1;
ve_spc_cpu_wakeup_irq(cluster, cpu, false);
ve_spc_set_resume_addr(cluster, cpu, 0);
arch_spin_unlock(&tc2_pm_lock);
local_irq_restore(flags);
}
static const struct mcpm_platform_ops tc2_pm_power_ops = {
.power_up = tc2_pm_power_up,
.power_down = tc2_pm_power_down,
.suspend = tc2_pm_suspend,
.powered_up = tc2_pm_powered_up,
};
static bool __init tc2_pm_usage_count_init(void)
{
unsigned int mpidr, cpu, cluster;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
pr_err("%s: boot CPU is out of bound!\n", __func__);
return false;
}
tc2_pm_use_count[cpu][cluster] = 1;
return true;
}
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*/
static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
{
asm volatile (" \n"
" cmp r0, #1 \n"
" bxne lr \n"
" b cci_enable_port_for_self ");
}
static int __init tc2_pm_init(void)
{
int ret;
void __iomem *scc;
u32 a15_cluster_id, a7_cluster_id, sys_info;
struct device_node *np;
/*
* The power management-related features are hidden behind
* SCC registers. We need to extract runtime information like
* cluster ids and number of CPUs really available in clusters.
*/
np = of_find_compatible_node(NULL, NULL,
"arm,vexpress-scc,v2p-ca15_a7");
scc = of_iomap(np, 0);
if (!scc)
return -ENODEV;
a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
return -EINVAL;
sys_info = readl_relaxed(scc + SYS_INFO);
tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
/*
* A subset of the SCC registers is also used to communicate
* with the SPC (power controller). We need to be able to
* drive it very early in the boot process to power up
* processors, so we initialize the SPC driver here.
*/
ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
if (ret)
return ret;
if (!cci_probed())
return -ENODEV;
if (!tc2_pm_usage_count_init())
return -EINVAL;
ret = mcpm_platform_register(&tc2_pm_power_ops);
if (!ret) {
mcpm_sync_init(tc2_pm_power_up_setup);
pr_info("TC2 power management initialized\n");
}
return ret;
}
early_initcall(tc2_pm_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment