Commit 9cfb541a authored by Vladimir Murzin's avatar Vladimir Murzin Committed by Russell King

ARM: 8754/1: NOMMU: Move PMSAv7 MPU under it's own namespace

We are going to support different MPU which programming model is not
compatible to PMSAv7, so move PMSAv7 MPU under it's own namespace.
Tested-by: default avatarSzemz? András <sza@esh.hu>
Tested-by: default avatarAlexandre TORGUE <alexandre.torgue@st.com>
Signed-off-by: default avatarVladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@armlinux.org.uk>
parent e7229f7d
...@@ -14,50 +14,50 @@ ...@@ -14,50 +14,50 @@
#define MMFR0_PMSAv7 (3 << 4) #define MMFR0_PMSAv7 (3 << 4)
/* MPU D/I Size Register fields */ /* MPU D/I Size Register fields */
#define MPU_RSR_SZ 1 #define PMSAv7_RSR_SZ 1
#define MPU_RSR_EN 0 #define PMSAv7_RSR_EN 0
#define MPU_RSR_SD 8 #define PMSAv7_RSR_SD 8
/* Number of subregions (SD) */ /* Number of subregions (SD) */
#define MPU_NR_SUBREGS 8 #define PMSAv7_NR_SUBREGS 8
#define MPU_MIN_SUBREG_SIZE 256 #define PMSAv7_MIN_SUBREG_SIZE 256
/* The D/I RSR value for an enabled region spanning the whole of memory */ /* The D/I RSR value for an enabled region spanning the whole of memory */
#define MPU_RSR_ALL_MEM 63 #define PMSAv7_RSR_ALL_MEM 63
/* Individual bits in the DR/IR ACR */ /* Individual bits in the DR/IR ACR */
#define MPU_ACR_XN (1 << 12) #define PMSAv7_ACR_XN (1 << 12)
#define MPU_ACR_SHARED (1 << 2) #define PMSAv7_ACR_SHARED (1 << 2)
/* C, B and TEX[2:0] bits only have semantic meanings when grouped */ /* C, B and TEX[2:0] bits only have semantic meanings when grouped */
#define MPU_RGN_CACHEABLE 0xB #define PMSAv7_RGN_CACHEABLE 0xB
#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) #define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
#define MPU_RGN_STRONGLY_ORDERED 0 #define PMSAv7_RGN_STRONGLY_ORDERED 0
/* Main region should only be shared for SMP */ /* Main region should only be shared for SMP */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) #define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
#else #else
#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE #define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE
#endif #endif
/* Access permission bits of ACR (only define those that we use)*/ /* Access permission bits of ACR (only define those that we use)*/
#define MPU_AP_PL1RO_PL0NA (0x5 << 8) #define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8)
#define MPU_AP_PL1RW_PL0RW (0x3 << 8) #define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8)
#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) #define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
#define MPU_AP_PL1RW_PL0NA (0x1 << 8) #define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
/* For minimal static MPU region configurations */ /* For minimal static MPU region configurations */
#define MPU_PROBE_REGION 0 #define PMSAv7_PROBE_REGION 0
#define MPU_BG_REGION 1 #define PMSAv7_BG_REGION 1
#define MPU_RAM_REGION 2 #define PMSAv7_RAM_REGION 2
#define MPU_ROM_REGION 3 #define PMSAv7_ROM_REGION 3
/* Maximum number of regions Linux is interested in */ /* Maximum number of regions Linux is interested in */
#define MPU_MAX_REGIONS 16 #define MPU_MAX_REGIONS 16
#define MPU_DATA_SIDE 0 #define PMSAv7_DATA_SIDE 0
#define MPU_INSTR_SIDE 1 #define PMSAv7_INSTR_SIDE 1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -75,16 +75,12 @@ struct mpu_rgn_info { ...@@ -75,16 +75,12 @@ struct mpu_rgn_info {
extern struct mpu_rgn_info mpu_rgn_info; extern struct mpu_rgn_info mpu_rgn_info;
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
extern void __init pmsav7_adjust_lowmem_bounds(void);
extern void __init adjust_lowmem_bounds_mpu(void); extern void __init pmsav7_setup(void);
extern void __init mpu_setup(void);
#else #else
static inline void pmsav7_adjust_lowmem_bounds(void) {};
static inline void adjust_lowmem_bounds_mpu(void) {} static inline void pmsav7_setup(void) {};
static inline void mpu_setup(void) {} #endif
#endif /* !CONFIG_ARM_MPU */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -64,9 +64,9 @@ ...@@ -64,9 +64,9 @@
#define MPU_CTRL_ENABLE 1 #define MPU_CTRL_ENABLE 1
#define MPU_CTRL_PRIVDEFENA (1 << 2) #define MPU_CTRL_PRIVDEFENA (1 << 2)
#define MPU_RNR 0x98 #define PMSAv7_RNR 0x98
#define MPU_RBAR 0x9c #define PMSAv7_RBAR 0x9c
#define MPU_RASR 0xa0 #define PMSAv7_RASR 0xa0
/* Cache opeartions */ /* Cache opeartions */
#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
......
...@@ -194,9 +194,9 @@ int main(void) ...@@ -194,9 +194,9 @@ int main(void)
DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used)); DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn)); DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn));
DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
#endif #endif
return 0; return 0;
} }
...@@ -68,14 +68,6 @@ ENTRY(stext) ...@@ -68,14 +68,6 @@ ENTRY(stext)
beq __error_p @ yes, error 'p' beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */
ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
bl __setup_mpu bl __setup_mpu
#endif #endif
...@@ -110,8 +102,6 @@ ENTRY(secondary_startup) ...@@ -110,8 +102,6 @@ ENTRY(secondary_startup)
ldr r7, __secondary_data ldr r7, __secondary_data
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
/* Use MPU region info supplied by __cpu_up */
ldr r6, [r7] @ get secondary_data.mpu_rgn_info
bl __secondary_setup_mpu @ Initialize the MPU bl __secondary_setup_mpu @ Initialize the MPU
#endif #endif
...@@ -184,7 +174,7 @@ ENDPROC(__after_proc_init) ...@@ -184,7 +174,7 @@ ENDPROC(__after_proc_init)
.endm .endm
/* Setup a single MPU region, either D or I side (D-side for unified) */ /* Setup a single MPU region, either D or I side (D-side for unified) */
.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused .macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
...@@ -192,14 +182,14 @@ ENDPROC(__after_proc_init) ...@@ -192,14 +182,14 @@ ENDPROC(__after_proc_init)
#else #else
.macro set_region_nr tmp, rgnr, base .macro set_region_nr tmp, rgnr, base
mov \tmp, \rgnr mov \tmp, \rgnr
str \tmp, [\base, #MPU_RNR] str \tmp, [\base, #PMSAv7_RNR]
.endm .endm
.macro setup_region bar, acr, sr, unused, base .macro setup_region bar, acr, sr, unused, base
lsl \acr, \acr, #16 lsl \acr, \acr, #16
orr \acr, \acr, \sr orr \acr, \acr, \sr
str \bar, [\base, #MPU_RBAR] str \bar, [\base, #PMSAv7_RBAR]
str \acr, [\base, #MPU_RASR] str \acr, [\base, #PMSAv7_RASR]
.endm .endm
#endif #endif
...@@ -210,7 +200,7 @@ ENDPROC(__after_proc_init) ...@@ -210,7 +200,7 @@ ENDPROC(__after_proc_init)
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
* *
* r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
*/ */
ENTRY(__setup_mpu) ENTRY(__setup_mpu)
...@@ -223,7 +213,20 @@ AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0 ...@@ -223,7 +213,20 @@ AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
M_CLASS(ldr r0, [r12, 0x50]) M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
bxne lr beq __setup_pmsa_v7
ret lr
ENDPROC(__setup_mpu)
ENTRY(__setup_pmsa_v7)
/* Calculate the size of a region covering just the kernel */
ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
/* Determine whether the D/I-side memory map is unified. We set the /* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */ * flags here and continue to use them for the rest of this function */
...@@ -234,47 +237,47 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE]) ...@@ -234,47 +237,47 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
/* Setup second region first to free up r6 */ /* Setup second region first to free up r6 */
set_region_nr r0, #MPU_RAM_REGION, r12 set_region_nr r0, #PMSAv7_RAM_REGION, r12
isb isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
beq 1f @ Memory-map not unified beq 1f @ Memory-map not unified
setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb 1: isb
/* First/background region */ /* First/background region */
set_region_nr r0, #MPU_BG_REGION, r12 set_region_nr r0, #PMSAv7_BG_REGION, r12
isb isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0 mov r0, #0 @ BG region starts at 0x0
ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
beq 2f @ Memory-map not unified beq 2f @ Memory-map not unified
setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb 2: isb
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
set_region_nr r0, #MPU_ROM_REGION, r12 set_region_nr r0, #PMSAv7_ROM_REGION, r12
isb isb
ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL) ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end ldr r6, =(_exiprom) @ ROM end
sub r6, r6, r0 @ Minimum size of region to map sub r6, r6, r0 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N... clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size rsb r6, r6, #31 @ ...so round up region size
lsl r6, r6, #MPU_RSR_SZ @ Put size in right field lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
beq 3f @ Memory-map not unified beq 3f @ Memory-map not unified
setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
3: isb 3: isb
#endif #endif
...@@ -291,7 +294,7 @@ M_CLASS(str r0, [r12, #MPU_CTRL]) ...@@ -291,7 +294,7 @@ M_CLASS(str r0, [r12, #MPU_CTRL])
isb isb
ret lr ret lr
ENDPROC(__setup_mpu) ENDPROC(__setup_pmsa_v7)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
...@@ -299,12 +302,21 @@ ENDPROC(__setup_mpu) ...@@ -299,12 +302,21 @@ ENDPROC(__setup_mpu)
*/ */
ENTRY(__secondary_setup_mpu) ENTRY(__secondary_setup_mpu)
/* Use MPU region info supplied by __cpu_up */
ldr r6, [r7] @ get secondary_data.mpu_rgn_info
/* Probe for v7 PMSA compliance */ /* Probe for v7 PMSA compliance */
mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
and r0, r0, #(MMFR0_PMSA) @ PMSA field and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
bne __error_p beq __secondary_setup_pmsa_v7
b __error_p
ENDPROC(__secondary_setup_mpu)
/*
* r6: pointer at mpu_rgn_info
*/
ENTRY(__secondary_setup_pmsa_v7)
/* Determine whether the D/I-side memory map is unified. We set the /* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */ * flags here and continue to use them for the rest of this function */
mrc p15, 0, r0, c0, c0, 4 @ MPUIR mrc p15, 0, r0, c0, c0, 4 @ MPUIR
...@@ -328,9 +340,9 @@ ENTRY(__secondary_setup_mpu) ...@@ -328,9 +340,9 @@ ENTRY(__secondary_setup_mpu)
ldr r6, [r3, #MPU_RGN_DRSR] ldr r6, [r3, #MPU_RGN_DRSR]
ldr r5, [r3, #MPU_RGN_DRACR] ldr r5, [r3, #MPU_RGN_DRACR]
setup_region r0, r5, r6, MPU_DATA_SIDE setup_region r0, r5, r6, PMSAv7_DATA_SIDE
beq 2f beq 2f
setup_region r0, r5, r6, MPU_INSTR_SIDE setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
2: isb 2: isb
mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
...@@ -345,7 +357,7 @@ ENTRY(__secondary_setup_mpu) ...@@ -345,7 +357,7 @@ ENTRY(__secondary_setup_mpu)
isb isb
ret lr ret lr
ENDPROC(__secondary_setup_mpu) ENDPROC(__secondary_setup_pmsa_v7)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* CONFIG_ARM_MPU */ #endif /* CONFIG_ARM_MPU */
......
...@@ -99,6 +99,32 @@ void __init arm_mm_memblock_reserve(void) ...@@ -99,6 +99,32 @@ void __init arm_mm_memblock_reserve(void)
memblock_reserve(0, 1); memblock_reserve(0, 1);
} }
static void __init adjust_lowmem_bounds_mpu(void)
{
unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
switch (pmsa) {
case MMFR0_PMSAv7:
pmsav7_adjust_lowmem_bounds();
break;
default:
break;
}
}
static void __init mpu_setup(void)
{
unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
switch (pmsa) {
case MMFR0_PMSAv7:
pmsav7_setup();
break;
default:
break;
}
}
void __init adjust_lowmem_bounds(void) void __init adjust_lowmem_bounds(void)
{ {
phys_addr_t end; phys_addr_t end;
......
...@@ -102,7 +102,7 @@ static inline u32 irbar_read(void) ...@@ -102,7 +102,7 @@ static inline u32 irbar_read(void)
static inline void rgnr_write(u32 v) static inline void rgnr_write(u32 v)
{ {
writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR); writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
} }
/* Data-side / unified region attributes */ /* Data-side / unified region attributes */
...@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v) ...@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v)
/* Region access control register */ /* Region access control register */
static inline void dracr_write(u32 v) static inline void dracr_write(u32 v)
{ {
u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0); u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR); writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
} }
/* Region size register */ /* Region size register */
static inline void drsr_write(u32 v) static inline void drsr_write(u32 v)
{ {
u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16); u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR); writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
} }
/* Region base address register */ /* Region base address register */
static inline void drbar_write(u32 v) static inline void drbar_write(u32 v)
{ {
writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR); writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
} }
static inline u32 drbar_read(void) static inline u32 drbar_read(void)
{ {
return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR); return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
} }
/* ARMv7-M only supports a unified MPU, so I-side operations are nop */ /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
...@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;} ...@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;}
#endif #endif
static int __init mpu_present(void)
{
return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
}
static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
{ {
unsigned long subreg, bslots, sslots; unsigned long subreg, bslots, sslots;
...@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r ...@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
bdiff = base - abase; bdiff = base - abase;
sdiff = p2size - asize; sdiff = p2size - asize;
subreg = p2size / MPU_NR_SUBREGS; subreg = p2size / PMSAv7_NR_SUBREGS;
if ((bdiff % subreg) || (sdiff % subreg)) if ((bdiff % subreg) || (sdiff % subreg))
return false; return false;
...@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r ...@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
if (bslots || sslots) { if (bslots || sslots) {
int i; int i;
if (subreg < MPU_MIN_SUBREG_SIZE) if (subreg < PMSAv7_MIN_SUBREG_SIZE)
return false; return false;
if (bslots + sslots > MPU_NR_SUBREGS) if (bslots + sslots > PMSAv7_NR_SUBREGS)
return false; return false;
for (i = 0; i < bslots; i++) for (i = 0; i < bslots; i++)
_set_bit(i, &region->subreg); _set_bit(i, &region->subreg);
for (i = 1; i <= sslots; i++) for (i = 1; i <= sslots; i++)
_set_bit(MPU_NR_SUBREGS - i, &region->subreg); _set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
} }
region->base = abase; region->base = abase;
...@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size, ...@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
} }
/* MPU initialisation functions */ /* MPU initialisation functions */
void __init adjust_lowmem_bounds_mpu(void) void __init pmsav7_adjust_lowmem_bounds(void)
{ {
phys_addr_t specified_mem_size = 0, total_mem_size = 0; phys_addr_t specified_mem_size = 0, total_mem_size = 0;
struct memblock_region *reg; struct memblock_region *reg;
...@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void) ...@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void)
unsigned int mem_max_regions; unsigned int mem_max_regions;
int num, i; int num, i;
if (!mpu_present()) /* Free-up PMSAv7_PROBE_REGION */
return;
/* Free-up MPU_PROBE_REGION */
mpu_min_region_order = __mpu_min_region_order(); mpu_min_region_order = __mpu_min_region_order();
/* How many regions are supported */ /* How many regions are supported */
...@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void) ...@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void)
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
unsigned long subreg = mem[i].size / MPU_NR_SUBREGS; unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg); total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n", pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
&mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg); &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
} }
if (total_mem_size != specified_mem_size) { if (total_mem_size != specified_mem_size) {
...@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void) ...@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void)
u32 drbar_result, irbar_result; u32 drbar_result, irbar_result;
/* We've kept a region free for this probing */ /* We've kept a region free for this probing */
rgnr_write(MPU_PROBE_REGION); rgnr_write(PMSAv7_PROBE_REGION);
isb(); isb();
/* /*
* As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
...@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, ...@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
return -ENOMEM; return -ENOMEM;
/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
size_data |= subregions << MPU_RSR_SD; size_data |= subregions << PMSAv7_RSR_SD;
if (need_flush) if (need_flush)
flush_cache_all(); flush_cache_all();
...@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, ...@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
/* /*
* Set up default MPU regions, doing nothing if there is no MPU * Set up default MPU regions, doing nothing if there is no MPU
*/ */
void __init mpu_setup(void) void __init pmsav7_setup(void)
{ {
int i, region = 0, err = 0; int i, region = 0, err = 0;
if (!mpu_present())
return;
/* Setup MPU (order is important) */ /* Setup MPU (order is important) */
/* Background */ /* Background */
err |= mpu_setup_region(region++, 0, 32, err |= mpu_setup_region(region++, 0, 32,
MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW, PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
0, false); 0, false);
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
...@@ -448,13 +437,13 @@ void __init mpu_setup(void) ...@@ -448,13 +437,13 @@ void __init mpu_setup(void)
* with BG region (which is uncachable), thus we need * with BG region (which is uncachable), thus we need
* to clean and invalidate cache. * to clean and invalidate cache.
*/ */
bool need_flush = region == MPU_RAM_REGION; bool need_flush = region == PMSAv7_RAM_REGION;
if (!xip[i].size) if (!xip[i].size)
continue; continue;
err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
xip[i].subreg, need_flush); xip[i].subreg, need_flush);
} }
#endif #endif
...@@ -465,14 +454,14 @@ void __init mpu_setup(void) ...@@ -465,14 +454,14 @@ void __init mpu_setup(void)
continue; continue;
err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
mem[i].subreg, false); mem[i].subreg, false);
} }
/* Vectors */ /* Vectors */
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
0, false); 0, false);
#endif #endif
if (err) { if (err) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment