Commit f1a2481c authored by Santosh Shilimkar's avatar Santosh Shilimkar Committed by Russell King

ARM: 6407/1: mmu: Setup MT_MEMORY and MT_MEMORY_NONCACHED L1 entries

This patch populates the L1 entries for MT_MEMORY and MT_MEMORY_NONCACHED
types so that at boot-up, we can map memories outside system memory
at page level granularity

Previously the mapping was limiting to section level, which creates
unnecessary additional mapping for which physical memory may not
present. On the newer ARM with speculation, this is dangerous and can
result in untraceable aborts.
Signed-off-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent f933b87e
...@@ -247,6 +247,9 @@ static struct mem_type mem_types[] = { ...@@ -247,6 +247,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_USER, .domain = DOMAIN_USER,
}, },
[MT_MEMORY] = { [MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
...@@ -255,6 +258,9 @@ static struct mem_type mem_types[] = { ...@@ -255,6 +258,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MEMORY_NONCACHED] = { [MT_MEMORY_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
...@@ -412,9 +418,12 @@ static void __init build_mem_type_table(void) ...@@ -412,9 +418,12 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported. * Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.) * (Only available on XSC3 at the moment.)
*/ */
if (arch_is_coherent() && cpu_is_xsc3()) if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
/* /*
* ARMv6 and above have extended page tables. * ARMv6 and above have extended page tables.
*/ */
...@@ -439,7 +448,9 @@ static void __init build_mem_type_table(void) ...@@ -439,7 +448,9 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
#endif #endif
} }
...@@ -476,6 +487,8 @@ static void __init build_mem_type_table(void) ...@@ -476,6 +487,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) { switch (cp->pmd) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment