Commit e4707dd3 authored by Paul Walmsley's avatar Paul Walmsley Committed by Russell King

[ARM] 5422/1: ARM: MMU: add a Non-cacheable Normal executable memory type

This patch adds a Non-cacheable Normal ARM executable memory type,
MT_MEMORY_NONCACHED.

On OMAP3, this is used for rapid dynamic voltage/frequency scaling in
the VDD2 voltage domain. OMAP3's SDRAM controller (SDRC) is in the
VDD2 voltage domain, and its clock frequency must change along with
voltage. The SDRC clock change code cannot run from SDRAM itself,
since SDRAM accesses are paused during the clock change. So the
current implementation of the DVFS code executes from OMAP on-chip
SRAM, aka "OCM RAM."

If the OCM RAM pages are marked as Cacheable, the ARM cache controller
will attempt to flush dirty cache lines to the SDRC, so it can fill
those lines with OCM RAM instruction code. The problem is that the
SDRC is paused during DVFS, and so any SDRAM access causes the ARM MPU
subsystem to hang.

TI's original solution to this problem was to mark the OCM RAM
sections as Strongly Ordered memory, thus preventing caching. This is
overkill: since the memory is marked as non-bufferable, OCM RAM writes
become needlessly slow. The idea of "Strongly Ordered SRAM" is also
conceptually disturbing. Previous LAKML list discussion is here:

http://www.spinics.net/lists/arm-kernel/msg54312.html

This memory type MT_MEMORY_NONCACHED is used for OCM RAM by a future
patch.

Cc: Richard Woodruff <r-woodruff2@ti.com>
Signed-off-by: default avatarPaul Walmsley <paul@pwsan.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 6dc4a47a
...@@ -26,6 +26,7 @@ struct map_desc { ...@@ -26,6 +26,7 @@ struct map_desc {
#define MT_HIGH_VECTORS 8 #define MT_HIGH_VECTORS 8
#define MT_MEMORY 9 #define MT_MEMORY 9
#define MT_ROM 10 #define MT_ROM 10
#define MT_MEMORY_NONCACHED 11
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int); extern void iotable_init(struct map_desc *, int);
......
...@@ -243,6 +243,10 @@ static struct mem_type mem_types[] = { ...@@ -243,6 +243,10 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT, .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MEMORY_NONCACHED] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
}; };
const struct mem_type *get_mem_type(unsigned int type) const struct mem_type *get_mem_type(unsigned int type)
...@@ -406,9 +410,28 @@ static void __init build_mem_type_table(void) ...@@ -406,9 +410,28 @@ static void __init build_mem_type_table(void)
kern_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif #endif
} }
/*
* Non-cacheable Normal - intended for memory areas that must
* not cause dirty cache line writebacks when used
*/
if (cpu_arch >= CPU_ARCH_ARMv6) {
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* Non-cacheable Normal is XCB = 001 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
PMD_SECT_BUFFERED;
} else {
/* For both ARMv6 and non-TEX-remapping ARMv7 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
PMD_SECT_TEX(1);
}
} else {
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]); unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot); protection_map[i] = __pgprot(v | user_pgprot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment