Commit df355101 authored by Amit Daniel Kachhap's avatar Amit Daniel Kachhap Committed by Catalin Marinas

arm64: ptrauth: Add bootup/runtime flags for __cpu_setup

This patch allows __cpu_setup to be invoked with one of these flags,
ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME.
This is required as some cpufeatures need different handling during
different scenarios.

The input parameter in x0 is preserved till the end to be used inside
this function.

There should be no functional change with this patch and is useful
for the subsequent ptrauth patch which utilizes it. Some upcoming
arm cpufeatures can also utilize these flags.
Suggested-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarAmit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: default avatarVincenzo Frascino <Vincenzo.Frascino@arm.com>
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent be129842
...@@ -23,6 +23,14 @@ ...@@ -23,6 +23,14 @@
#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)
/* Possible options for __cpu_setup */
/* Option to setup primary cpu */
#define ARM64_CPU_BOOT_PRIMARY (1)
/* Option to setup secondary cpus */
#define ARM64_CPU_BOOT_SECONDARY (2)
/* Option to setup cpus for different cpu run time services */
#define ARM64_CPU_RUNTIME (3)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/percpu.h> #include <asm/percpu.h>
......
...@@ -118,6 +118,7 @@ ENTRY(stext) ...@@ -118,6 +118,7 @@ ENTRY(stext)
* On return, the CPU will be ready for the MMU to be turned on and * On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set. * the TCR will have been set.
*/ */
mov x0, #ARM64_CPU_BOOT_PRIMARY
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
b __primary_switch b __primary_switch
ENDPROC(stext) ENDPROC(stext)
...@@ -712,6 +713,7 @@ secondary_startup: ...@@ -712,6 +713,7 @@ secondary_startup:
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
*/ */
bl __cpu_secondary_check52bitva bl __cpu_secondary_check52bitva
mov x0, #ARM64_CPU_BOOT_SECONDARY
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
bl __enable_mmu bl __enable_mmu
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/smp.h>
.text .text
/* /*
...@@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) ...@@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume) ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly bl el2_setup // if in EL2 drop to EL1 cleanly
mov x0, #ARM64_CPU_RUNTIME
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
......
...@@ -408,30 +408,30 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) ...@@ -408,30 +408,30 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
/* /*
* __cpu_setup * __cpu_setup
* *
* Initialise the processor for turning the MMU on. Return in x0 the * Initialise the processor for turning the MMU on.
* value of the SCTLR_EL1 register. *
* Input:
* x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME.
* Output:
* Return in x0 the value of the SCTLR_EL1 register.
*/ */
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
SYM_FUNC_START(__cpu_setup) SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB tlbi vmalle1 // Invalidate local TLB
dsb nsh dsb nsh
mov x0, #3 << 20 mov x1, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD msr cpacr_el1, x1 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable mov x1, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0 msr mdscr_el1, x1 // access to the DCC from EL0
isb // Unmask debug exceptions now, isb // Unmask debug exceptions now,
enable_dbg // since this is per-cpu enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x0 // Disable PMU access from EL0 reset_pmuserenr_el0 x1 // Disable PMU access from EL0
/* /*
* Memory region attributes * Memory region attributes
*/ */
mov_q x5, MAIR_EL1_SET mov_q x5, MAIR_EL1_SET
msr mair_el1, x5 msr mair_el1, x5
/*
* Prepare SCTLR
*/
mov_q x0, SCTLR_EL1_SET
/* /*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel. * both user and kernel.
...@@ -468,5 +468,9 @@ SYM_FUNC_START(__cpu_setup) ...@@ -468,5 +468,9 @@ SYM_FUNC_START(__cpu_setup)
1: 1:
#endif /* CONFIG_ARM64_HW_AFDBM */ #endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10 msr tcr_el1, x10
/*
* Prepare SCTLR
*/
mov_q x0, SCTLR_EL1_SET
ret // return to head.S ret // return to head.S
SYM_FUNC_END(__cpu_setup) SYM_FUNC_END(__cpu_setup)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment