Commit 18b4788b authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'omap-for-v4.17/am-pm-signed' of...

Merge tag 'omap-for-v4.17/am-pm-signed' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/soc

Pull "Add am335x and am437x PM code for v4.17" from Tony Lindgren:

This series of changes from Dave Gerlach adds the PM related
code to allow low-power suspend states. The code consists of
the SoC specific assembly code and a related PM driver.

* tag 'omap-for-v4.17/am-pm-signed' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
  soc: ti: Add pm33xx driver for basic suspend support
  ARM: OMAP2+: pm33xx-core: Add platform code needed for PM
  ARM: OMAP2+: Introduce low-level suspend code for AM43XX
  ARM: OMAP2+: Introduce low-level suspend code for AM33XX
parents 6f566c4f afe761f8
......@@ -13,6 +13,13 @@ Required properties:
Optional properties:
- sram: Phandle to the ocmcram node
am335x and am437x only:
- pm-sram: Phandles to ocmcram nodes to be used for power management.
First should be type 'protect-exec' for the driver to use to copy
and run PM functions, second should be regular pool to be used for
data region for code. See Documentation/devicetree/bindings/sram/sram.txt
for more details.
Examples:
- For an OMAP5 SMP system:
......@@ -36,3 +43,12 @@ mpu {
compatible = "ti,omap3-mpu";
ti,hwmods = "mpu";
};
- For an AM335x system:
mpu {
compatible = "ti,omap3-mpu";
ti,hwmods = "mpu";
pm-sram = <&pm_sram_code
&pm_sram_data>;
};
......@@ -72,6 +72,7 @@ config SOC_AM43XX
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select OMAP_INTERCONNECT
select ARM_CPU_SUSPEND if PM
config SOC_DRA7XX
bool "TI DRA7XX"
......
......@@ -88,6 +88,8 @@ omap-4-5-pm-common += pm44xx.o
obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-pm-common)
obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-pm-common)
obj-$(CONFIG_SOC_DRA7XX) += $(omap-4-5-pm-common)
obj-$(CONFIG_SOC_AM33XX) += pm33xx-core.o sleep33xx.o
obj-$(CONFIG_SOC_AM43XX) += pm33xx-core.o sleep43xx.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
obj-$(CONFIG_POWER_AVS_OMAP) += sr_device.o
......@@ -95,6 +97,8 @@ obj-$(CONFIG_POWER_AVS_OMAP_CLASS3) += smartreflex-class3.o
AFLAGS_sleep24xx.o :=-Wa,-march=armv6
AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_sleep33xx.o :=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_sleep43xx.o :=-Wa,-march=armv7-a$(plus_sec)
endif
......@@ -232,3 +236,15 @@ obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
obj-y += omap_phy_internal.o
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c
$(call if_changed_dep,cc_s_c)
include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
$(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
# For rule to generate ti-emif-asm-offsets.h dependency
include drivers/memory/Makefile.asm-offsets
arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
......@@ -77,6 +77,13 @@ static inline int omap4_pm_init_early(void)
}
#endif
#if defined(CONFIG_PM) && (defined(CONFIG_SOC_AM33XX) || \
defined(CONFIG_SOC_AM43XX))
void amx3_common_pm_init(void);
#else
static inline void amx3_common_pm_init(void) { }
#endif
extern void omap2_init_common_infrastructure(void);
extern void omap_init_time(void);
......
......@@ -622,6 +622,7 @@ void __init am33xx_init_early(void)
void __init am33xx_init_late(void)
{
omap_common_late_init();
amx3_common_pm_init();
}
#endif
......@@ -646,6 +647,7 @@ void __init am43xx_init_late(void)
{
omap_common_late_init();
omap2_clk_enable_autoidle_all();
amx3_common_pm_init();
}
#endif
......
// SPDX-License-Identifier: GPL-2.0
/*
* TI AM33XX and AM43XX PM Assembly Offsets
*
* Copyright (C) 2017-2018 Texas Instruments Inc.
*/
#include <linux/kbuild.h>
#include <linux/platform_data/pm33xx.h>
int main(void)
{
DEFINE(AMX3_PM_WFI_FLAGS_OFFSET,
offsetof(struct am33xx_pm_sram_data, wfi_flags));
DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,
offsetof(struct am33xx_pm_sram_data, l2_aux_ctrl_val));
DEFINE(AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET,
offsetof(struct am33xx_pm_sram_data, l2_prefetch_ctrl_val));
DEFINE(AMX3_PM_SRAM_DATA_SIZE, sizeof(struct am33xx_pm_sram_data));
BLANK();
DEFINE(AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET,
offsetof(struct am33xx_pm_ro_sram_data, amx3_pm_sram_data_virt));
DEFINE(AMX3_PM_RO_SRAM_DATA_PHYS_OFFSET,
offsetof(struct am33xx_pm_ro_sram_data, amx3_pm_sram_data_phys));
DEFINE(AMX3_PM_RO_SRAM_DATA_SIZE,
sizeof(struct am33xx_pm_ro_sram_data));
return 0;
}
......@@ -81,6 +81,9 @@ extern unsigned int omap3_do_wfi_sz;
/* ... and its pointer from SRAM after copy */
extern void (*omap3_do_wfi_sram)(void);
extern struct am33xx_pm_sram_addr am33xx_pm_sram;
extern struct am33xx_pm_sram_addr am43xx_pm_sram;
extern void omap3_save_scratchpad_contents(void);
#define PM_RTA_ERRATUM_i608 (1 << 0)
......
// SPDX-License-Identifier: GPL-2.0
/*
* AM33XX Arch Power Management Routines
*
* Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach
*/
#include <asm/smp_scu.h>
#include <asm/suspend.h>
#include <linux/errno.h>
#include <linux/platform_data/pm33xx.h>
#include "cm33xx.h"
#include "common.h"
#include "control.h"
#include "clockdomain.h"
#include "iomap.h"
#include "omap_hwmod.h"
#include "pm.h"
#include "powerdomain.h"
#include "prm33xx.h"
#include "soc.h"
#include "sram.h"
static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
static struct clockdomain *gfx_l4ls_clkdm;
static void __iomem *scu_base;
static int __init am43xx_map_scu(void)
{
scu_base = ioremap(scu_a9_get_base(), SZ_256);
if (!scu_base)
return -ENOMEM;
return 0;
}
static int amx3_common_init(void)
{
gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
per_pwrdm = pwrdm_lookup("per_pwrdm");
mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm))
return -ENODEV;
(void)clkdm_for_each(omap_pm_clkdms_setup, NULL);
/* CEFUSE domain can be turned off post bootup */
cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
if (cefuse_pwrdm)
omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
else
pr_err("PM: Failed to get cefuse_pwrdm\n");
return 0;
}
static int am33xx_suspend_init(void)
{
int ret;
gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm");
if (!gfx_l4ls_clkdm) {
pr_err("PM: Cannot lookup gfx_l4ls_clkdm clockdomains\n");
return -ENODEV;
}
ret = amx3_common_init();
return ret;
}
static int am43xx_suspend_init(void)
{
int ret = 0;
ret = am43xx_map_scu();
if (ret) {
pr_err("PM: Could not ioremap SCU\n");
return ret;
}
ret = amx3_common_init();
return ret;
}
static void amx3_pre_suspend_common(void)
{
omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF);
}
static void amx3_post_suspend_common(void)
{
int status;
/*
* Because gfx_pwrdm is the only one under MPU control,
* comment on transition status
*/
status = pwrdm_read_pwrst(gfx_pwrdm);
if (status != PWRDM_POWER_OFF)
pr_err("PM: GFX domain did not transition: %x\n", status);
}
static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long))
{
int ret = 0;
amx3_pre_suspend_common();
ret = cpu_suspend(0, fn);
amx3_post_suspend_common();
/*
* BUG: GFX_L4LS clock domain needs to be woken up to
* ensure thet L4LS clock domain does not get stuck in
* transition. If that happens L3 module does not get
* disabled, thereby leading to PER power domain
* transition failing
*/
clkdm_wakeup(gfx_l4ls_clkdm);
clkdm_sleep(gfx_l4ls_clkdm);
return ret;
}
static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long))
{
int ret = 0;
amx3_pre_suspend_common();
scu_power_mode(scu_base, SCU_PM_POWEROFF);
ret = cpu_suspend(0, fn);
scu_power_mode(scu_base, SCU_PM_NORMAL);
amx3_post_suspend_common();
return ret;
}
static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
{
if (soc_is_am33xx())
return &am33xx_pm_sram;
else if (soc_is_am437x())
return &am43xx_pm_sram;
else
return NULL;
}
static struct am33xx_pm_platform_data am33xx_ops = {
.init = am33xx_suspend_init,
.soc_suspend = am33xx_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
};
static struct am33xx_pm_platform_data am43xx_ops = {
.init = am43xx_suspend_init,
.soc_suspend = am43xx_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
};
static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
{
if (soc_is_am33xx())
return &am33xx_ops;
else if (soc_is_am437x())
return &am43xx_ops;
else
return NULL;
}
void __init amx3_common_pm_init(void)
{
struct am33xx_pm_platform_data *pdata;
struct platform_device_info devinfo;
pdata = am33xx_pm_get_pdata();
memset(&devinfo, 0, sizeof(devinfo));
devinfo.name = "pm33xx";
devinfo.data = pdata;
devinfo.size_data = sizeof(*pdata);
devinfo.id = -1;
platform_device_register_full(&devinfo);
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Low level suspend code for AM33XX SoCs
*
* Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach, Vaibhav Bedia
*/
#include <generated/ti-emif-asm-offsets.h>
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "iomap.h"
#include "cm33xx.h"
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
.arm
.align 3
ENTRY(am33xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
blx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 and L2 data cache.
*/
ldr r1, kernel_flush
blx r1
adr r9, am33xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
/* Disable EMIF */
ldr r1, virt_emif_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
ldr r1, virt_emif_clkctrl
wait_emif_disable:
ldr r2, [r1]
mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
cmp r2, r3
bne wait_emif_disable
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
* to DISABLED
*/
ldr r1, virt_mpu_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Thirteen
* NOPs as per Cortex-A8 pipeline.
*/
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1, virt_mpu_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
/* Re-enable EMIF */
ldr r1, virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
/*
* Set SCTLR.C bit to allow data cache allocation
*/
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1 << 2) @ Enable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(am33xx_do_wfi)
.align
ENTRY(am33xx_resume_offset)
.word . - am33xx_do_wfi
ENTRY(am33xx_resume_from_deep_sleep)
/* Re-enable EMIF */
ldr r0, phys_emif_clkctrl
mov r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r1, [r0]
wait_emif_enable1:
ldr r2, [r0]
cmp r1, r2
bne wait_emif_enable1
adr r9, am33xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
blx r1
ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
blx r1
resume_to_ddr:
/* We are back. Branch to the common CPU resume routine */
mov r0, #0
ldr pc, resume_addr
ENDPROC(am33xx_resume_from_deep_sleep)
/*
* Local variables
*/
.align
resume_addr:
.word cpu_resume - PAGE_OFFSET + 0x80000000
kernel_flush:
.word v7_flush_dcache_all
virt_mpu_clkctrl:
.word AM33XX_CM_MPU_MPU_CLKCTRL
virt_emif_clkctrl:
.word AM33XX_CM_PER_EMIF_CLKCTRL
phys_emif_clkctrl:
.word (AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
.align 3
/* DDR related defines */
am33xx_emif_sram_table:
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY(am33xx_pm_sram)
.word am33xx_do_wfi
.word am33xx_do_wfi_sz
.word am33xx_resume_offset
.word am33xx_emif_sram_table
.word am33xx_pm_ro_sram_data
.align 3
ENTRY(am33xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY(am33xx_do_wfi_sz)
.word . - am33xx_do_wfi
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Low level suspend code for AM43XX SoCs
*
* Copyright (C) 2013-2018 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach, Vaibhav Bedia
*/
#include <generated/ti-emif-asm-offsets.h>
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/memory.h>
#include "cm33xx.h"
#include "common.h"
#include "iomap.h"
#include "omap-secure.h"
#include "omap44xx.h"
#include "prm33xx.h"
#include "prcm43xx.h"
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
#define AM43XX_EMIF_POWEROFF_ENABLE 0x1
#define AM43XX_EMIF_POWEROFF_DISABLE 0x0
#define AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP 0x1
#define AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO 0x3
#define AM43XX_CM_BASE 0x44DF0000
#define AM43XX_CM_REGADDR(inst, reg) \
AM33XX_L4_WK_IO_ADDRESS(AM43XX_CM_BASE + (inst) + (reg))
#define AM43XX_CM_MPU_CLKSTCTRL AM43XX_CM_REGADDR(AM43XX_CM_MPU_INST, \
AM43XX_CM_MPU_MPU_CDOFFS)
#define AM43XX_CM_MPU_MPU_CLKCTRL AM43XX_CM_REGADDR(AM43XX_CM_MPU_INST, \
AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET)
#define AM43XX_CM_PER_EMIF_CLKCTRL AM43XX_CM_REGADDR(AM43XX_CM_PER_INST, \
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
#define AM43XX_PRM_EMIF_CTRL_OFFSET 0x0030
.arm
.align 3
ENTRY(am43xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/* Retrieve l2 cache virt address BEFORE we shut off EMIF */
ldr r1, get_l2cache_base
blx r1
mov r8, r0
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
blx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
dsb
/*
* Invalidate L1 and L2 data cache.
*/
ldr r1, kernel_flush
blx r1
#ifdef CONFIG_CACHE_L2X0
/*
* Clean and invalidate the L2 cache.
*/
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x03
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
mov r0, r8
adr r4, am43xx_pm_ro_sram_data
ldr r3, [r4, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
mov r2, r0
ldr r0, [r2, #L2X0_AUX_CTRL]
str r0, [r3, #AMX3_PM_L2_AUX_CTRL_VAL_OFFSET]
ldr r0, [r2, #L310_PREFETCH_CTRL]
str r0, [r3, #AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET]
ldr r0, l2_val
str r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
ldr r1, l2_val
ands r0, r0, r1
bne wait
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
l2x_sync:
mov r0, r8
mov r2, r0
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync
#endif
adr r9, am43xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
/* Disable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
wait_emif_disable:
ldr r2, [r1]
mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
cmp r2, r3
bne wait_emif_disable
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
* to DISABLED
*/
ldr r1, am43xx_virt_mpu_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
/*
* Put MPU CLKDM to SW_SLEEP
*/
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP
str r2, [r1]
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Sixteen
* NOPs as per Cortex-A9 pipeline.
*/
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO
str r2, [r1]
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1, am43xx_virt_mpu_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
/* Re-enable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable
/*
* Set SCTLR.C bit to allow data cache allocation
*/
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1 << 2) @ Enable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(am43xx_do_wfi)
.align
ENTRY(am43xx_resume_offset)
.word . - am43xx_do_wfi
ENTRY(am43xx_resume_from_deep_sleep)
/* Set MPU CLKSTCTRL to HW AUTO so that CPUidle works properly */
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO
str r2, [r1]
/* For AM43xx, use EMIF power down until context is restored */
ldr r2, am43xx_phys_emif_poweroff
mov r1, #AM43XX_EMIF_POWEROFF_ENABLE
str r1, [r2, #0x0]
/* Re-enable EMIF */
ldr r1, am43xx_phys_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable1:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable1
adr r9, am43xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
blx r1
ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
blx r1
ldr r2, am43xx_phys_emif_poweroff
mov r1, #AM43XX_EMIF_POWEROFF_DISABLE
str r1, [r2, #0x0]
#ifdef CONFIG_CACHE_L2X0
ldr r2, l2_cache_base
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1
beq skip_l2en @ Skip if already enabled
adr r4, am43xx_pm_ro_sram_data
ldr r3, [r4, #AMX3_PM_RO_SRAM_DATA_PHYS_OFFSET]
ldr r0, [r3, #AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET]
ldr r12, l2_smc1
dsb
smc #0
dsb
set_aux_ctrl:
ldr r0, [r3, #AMX3_PM_L2_AUX_CTRL_VAL_OFFSET]
ldr r12, l2_smc2
dsb
smc #0
dsb
/* L2 invalidate on resume */
ldr r0, l2_val
ldr r2, l2_cache_base
str r0, [r2, #L2X0_INV_WAY]
wait2:
ldr r0, [r2, #L2X0_INV_WAY]
ldr r1, l2_val
ands r0, r0, r1
bne wait2
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
l2x_sync2:
ldr r2, l2_cache_base
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync2:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync2
mov r0, #0x1
ldr r12, l2_smc3
dsb
smc #0
dsb
#endif
skip_l2en:
/* We are back. Branch to the common CPU resume routine */
mov r0, #0
ldr pc, resume_addr
ENDPROC(am43xx_resume_from_deep_sleep)
/*
* Local variables
*/
.align
resume_addr:
.word cpu_resume - PAGE_OFFSET + 0x80000000
get_l2cache_base:
.word omap4_get_l2cache_base
kernel_flush:
.word v7_flush_dcache_all
ddr_start:
.word PAGE_OFFSET
am43xx_phys_emif_poweroff:
.word (AM43XX_CM_BASE + AM43XX_PRM_DEVICE_INST + \
AM43XX_PRM_EMIF_CTRL_OFFSET)
am43xx_virt_mpu_clkstctrl:
.word (AM43XX_CM_MPU_CLKSTCTRL)
am43xx_virt_mpu_clkctrl:
.word (AM43XX_CM_MPU_MPU_CLKCTRL)
am43xx_virt_emif_clkctrl:
.word (AM43XX_CM_PER_EMIF_CLKCTRL)
am43xx_phys_emif_clkctrl:
.word (AM43XX_CM_BASE + AM43XX_CM_PER_INST + \
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
/* L2 cache related defines for AM437x */
l2_cache_base:
.word OMAP44XX_L2CACHE_BASE
l2_smc1:
.word OMAP4_MON_L2X0_PREFETCH_INDEX
l2_smc2:
.word OMAP4_MON_L2X0_AUXCTRL_INDEX
l2_smc3:
.word OMAP4_MON_L2X0_CTRL_INDEX
l2_val:
.word 0xffff
.align 3
/* DDR related defines */
ENTRY(am43xx_emif_sram_table)
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY(am43xx_pm_sram)
.word am43xx_do_wfi
.word am43xx_do_wfi_sz
.word am43xx_resume_offset
.word am43xx_emif_sram_table
.word am43xx_pm_ro_sram_data
.align 3
ENTRY(am43xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY(am43xx_do_wfi_sz)
.word . - am43xx_do_wfi
......@@ -28,6 +28,15 @@ config KEYSTONE_NAVIGATOR_DMA
If unsure, say N.
config AMX3_PM
tristate "AMx3 Power Management"
depends on SOC_AM33XX || SOC_AM43XX
depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM
help
Enable power management on AM335x and AM437x. Required for suspend to mem
and standby states on both AM335x and AM437x platforms and for deeper cpuidle
c-states on AM335x.
config WKUP_M3_IPC
tristate "TI AMx3 Wkup-M3 IPC Driver"
depends on WKUP_M3_RPROC
......
......@@ -5,5 +5,6 @@
obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
obj-$(CONFIG_AMX3_PM) += pm33xx.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
// SPDX-License-Identifier: GPL-2.0
/*
* AM33XX Power Management Routines
*
* Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
* Vaibhav Bedia, Dave Gerlach
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/sram.h>
#include <linux/suspend.h>
#include <linux/ti-emif-sram.h>
#include <linux/wkup_m3_ipc.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include <asm/system_misc.h>
#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
(unsigned long)pm_sram->do_wfi)
static int (*am33xx_do_wfi_sram)(unsigned long unused);
static phys_addr_t am33xx_do_wfi_sram_phys;
static struct gen_pool *sram_pool, *sram_pool_data;
static unsigned long ocmcram_location, ocmcram_location_data;
static struct am33xx_pm_platform_data *pm_ops;
static struct am33xx_pm_sram_addr *pm_sram;
static struct device *pm33xx_dev;
static struct wkup_m3_ipc *m3_ipc;
static u32 sram_suspend_address(unsigned long addr)
{
return ((unsigned long)am33xx_do_wfi_sram +
AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
}
#ifdef CONFIG_SUSPEND
static int am33xx_pm_suspend(suspend_state_t suspend_state)
{
int i, ret = 0;
ret = pm_ops->soc_suspend((unsigned long)suspend_state,
am33xx_do_wfi_sram);
if (ret) {
dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
} else {
i = m3_ipc->ops->request_pm_status(m3_ipc);
switch (i) {
case 0:
dev_info(pm33xx_dev,
"PM: Successfully put all powerdomains to target state\n");
break;
case 1:
dev_err(pm33xx_dev,
"PM: Could not transition all powerdomains to target state\n");
ret = -1;
break;
default:
dev_err(pm33xx_dev,
"PM: CM3 returned unknown result = %d\n", i);
ret = -1;
}
}
return ret;
}
static int am33xx_pm_enter(suspend_state_t suspend_state)
{
int ret = 0;
switch (suspend_state) {
case PM_SUSPEND_MEM:
case PM_SUSPEND_STANDBY:
ret = am33xx_pm_suspend(suspend_state);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int am33xx_pm_begin(suspend_state_t state)
{
int ret = -EINVAL;
switch (state) {
case PM_SUSPEND_MEM:
ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
break;
case PM_SUSPEND_STANDBY:
ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
break;
}
return ret;
}
static void am33xx_pm_end(void)
{
m3_ipc->ops->finish_low_power(m3_ipc);
}
static int am33xx_pm_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
return 1;
default:
return 0;
}
}
static const struct platform_suspend_ops am33xx_pm_ops = {
.begin = am33xx_pm_begin,
.end = am33xx_pm_end,
.enter = am33xx_pm_enter,
.valid = am33xx_pm_valid,
};
#endif /* CONFIG_SUSPEND */
static void am33xx_pm_set_ipc_ops(void)
{
u32 resume_address;
int temp;
temp = ti_emif_get_mem_type();
if (temp < 0) {
dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
return;
}
m3_ipc->ops->set_mem_type(m3_ipc, temp);
/* Physical resume address to be used by ROM code */
resume_address = am33xx_do_wfi_sram_phys +
*pm_sram->resume_offset + 0x4;
m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
}
static void am33xx_pm_free_sram(void)
{
gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
gen_pool_free(sram_pool_data, ocmcram_location_data,
sizeof(struct am33xx_pm_ro_sram_data));
}
/*
* Push the minimal suspend-resume code to SRAM
*/
static int am33xx_pm_alloc_sram(void)
{
struct device_node *np;
int ret = 0;
np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
if (!np) {
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
if (!np) {
dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
__func__);
return -ENODEV;
}
}
sram_pool = of_gen_pool_get(np, "pm-sram", 0);
if (!sram_pool) {
dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
__func__);
ret = -ENODEV;
goto mpu_put_node;
}
sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
if (!sram_pool_data) {
dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
__func__);
ret = -ENODEV;
goto mpu_put_node;
}
ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
if (!ocmcram_location) {
dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
__func__);
ret = -ENOMEM;
goto mpu_put_node;
}
ocmcram_location_data = gen_pool_alloc(sram_pool_data,
sizeof(struct emif_regs_amx3));
if (!ocmcram_location_data) {
dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
ret = -ENOMEM;
}
mpu_put_node:
of_node_put(np);
return ret;
}
static int am33xx_push_sram_idle(void)
{
struct am33xx_pm_ro_sram_data ro_sram_data;
int ret;
u32 table_addr, ro_data_addr;
void *copy_addr;
ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
ro_sram_data.amx3_pm_sram_data_phys =
gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
/* Save physical address to calculate resume offset during pm init */
am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
ocmcram_location);
am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
pm_sram->do_wfi,
*pm_sram->do_wfi_sz);
if (!am33xx_do_wfi_sram) {
dev_err(pm33xx_dev,
"PM: %s: am33xx_do_wfi copy to sram failed\n",
__func__);
return -ENODEV;
}
table_addr =
sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
if (ret) {
dev_dbg(pm33xx_dev,
"PM: %s: EMIF function copy failed\n", __func__);
return -EPROBE_DEFER;
}
ro_data_addr =
sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
&ro_sram_data,
sizeof(ro_sram_data));
if (!copy_addr) {
dev_err(pm33xx_dev,
"PM: %s: ro_sram_data copy to sram failed\n",
__func__);
return -ENODEV;
}
return 0;
}
static int am33xx_pm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
if (!of_machine_is_compatible("ti,am33xx") &&
!of_machine_is_compatible("ti,am43"))
return -ENODEV;
pm_ops = dev->platform_data;
if (!pm_ops) {
dev_err(dev, "PM: Cannot get core PM ops!\n");
return -ENODEV;
}
pm_sram = pm_ops->get_sram_addrs();
if (!pm_sram) {
dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
return -ENODEV;
}
pm33xx_dev = dev;
ret = am33xx_pm_alloc_sram();
if (ret)
return ret;
ret = am33xx_push_sram_idle();
if (ret)
goto err_free_sram;
m3_ipc = wkup_m3_ipc_get();
if (!m3_ipc) {
dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n");
ret = -EPROBE_DEFER;
goto err_free_sram;
}
am33xx_pm_set_ipc_ops();
#ifdef CONFIG_SUSPEND
suspend_set_ops(&am33xx_pm_ops);
#endif /* CONFIG_SUSPEND */
ret = pm_ops->init();
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
goto err_put_wkup_m3_ipc;
}
return 0;
err_put_wkup_m3_ipc:
wkup_m3_ipc_put(m3_ipc);
err_free_sram:
am33xx_pm_free_sram();
pm33xx_dev = NULL;
return ret;
}
static int am33xx_pm_remove(struct platform_device *pdev)
{
suspend_set_ops(NULL);
wkup_m3_ipc_put(m3_ipc);
am33xx_pm_free_sram();
return 0;
}
static struct platform_driver am33xx_pm_driver = {
.driver = {
.name = "pm33xx",
},
.probe = am33xx_pm_probe,
.remove = am33xx_pm_remove,
};
module_platform_driver(am33xx_pm_driver);
MODULE_ALIAS("platform:pm33xx");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("am33xx power management driver");
/* SPDX-License-Identifier: GPL-2.0 */
/*
* TI pm33xx platform data
*
* Copyright (C) 2016-2018 Texas Instruments, Inc.
* Dave Gerlach <d-gerlach@ti.com>
*/
#ifndef _LINUX_PLATFORM_DATA_PM33XX_H
#define _LINUX_PLATFORM_DATA_PM33XX_H
#include <linux/kbuild.h>
#include <linux/types.h>
#ifndef __ASSEMBLER__
struct am33xx_pm_sram_addr {
void (*do_wfi)(void);
unsigned long *do_wfi_sz;
unsigned long *resume_offset;
unsigned long *emif_sram_table;
unsigned long *ro_sram_data;
};
struct am33xx_pm_platform_data {
int (*init)(void);
int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long));
struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
};
struct am33xx_pm_sram_data {
u32 wfi_flags;
u32 l2_aux_ctrl_val;
u32 l2_prefetch_ctrl_val;
} __packed __aligned(8);
struct am33xx_pm_ro_sram_data {
u32 amx3_pm_sram_data_virt;
u32 amx3_pm_sram_data_phys;
} __packed __aligned(8);
#endif /* __ASSEMBLER__ */
#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment