Commit 3fbd55ec authored by Russell King's avatar Russell King

Merge branch 'for-rmk/lpae' of...

Merge branch 'for-rmk/lpae' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable

Conflicts:
	arch/arm/kernel/smp.c

Please pull these miscellaneous LPAE fixes I've been collecting for a while
now for 3.11. They've been tested and reviewed by quite a few people, and most
of the patches are pretty trivial. -- Will Deacon.
parents b3f288de a469abd0
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <asm/cache.h>
#ifdef CONFIG_NEED_MACH_MEMORY_H #ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h> #include <mach/memory.h>
#endif #endif
...@@ -141,6 +143,20 @@ ...@@ -141,6 +143,20 @@
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
/*
* Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
* around in head.S and proc-*.S are shifted by this amount, in order to
* leave spare high bits for systems with physical address extension. This
* does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
* gives us about 38-bits or so.
*/
#ifdef CONFIG_ARM_LPAE
#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
#else
#define ARCH_PGD_SHIFT 0
#endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) ...@@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
* direct-mapped view. We assume this is the first page * direct-mapped view. We assume this is the first page
* of RAM in the mem_map as well. * of RAM in the mem_map as well.
*/ */
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
/* /*
* These are *only* valid on the kernel direct mapped RAM memory. * These are *only* valid on the kernel direct mapped RAM memory.
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* PAGE_SHIFT determines the page size */ /* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -83,4 +83,24 @@ ...@@ -83,4 +83,24 @@
#define PHYS_MASK_SHIFT (40) #define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
* 0x80000000: T0SZ = 0, T1SZ = 1
* 0xc0000000: T0SZ = 0, T1SZ = 2
*
* Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
* booting secondary CPUs would end up using TTBR1 for the identity
* mapping set up in TTBR0.
*/
#if defined CONFIG_VMSPLIT_2G
#define TTBR1_OFFSET 16 /* skip two L1 entries */
#elif defined CONFIG_VMSPLIT_3G
#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */
#else
#define TTBR1_OFFSET 0
#endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
#endif #endif
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#define PTRS_PER_PMD 512 #define PTRS_PER_PMD 512
#define PTRS_PER_PGD 4 #define PTRS_PER_PGD 4
#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) #define PTE_HWTABLE_PTRS (0)
#define PTE_HWTABLE_OFF (0) #define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
...@@ -48,16 +48,16 @@ ...@@ -48,16 +48,16 @@
#define PMD_SHIFT 21 #define PMD_SHIFT 21
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~((1 << PMD_SHIFT) - 1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
/* /*
* section address mask and size definitions. * section address mask and size definitions.
*/ */
#define SECTION_SHIFT 21 #define SECTION_SHIFT 21
#define SECTION_SIZE (1UL << SECTION_SHIFT) #define SECTION_SIZE (1UL << SECTION_SHIFT)
#define SECTION_MASK (~(SECTION_SIZE-1)) #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
......
...@@ -60,7 +60,7 @@ extern struct processor { ...@@ -60,7 +60,7 @@ extern struct processor {
/* /*
* Set the page table * Set the page table
*/ */
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
/* /*
* Set a possibly extended PTE. Non-extended PTEs should * Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'. * ignore 'ext'.
...@@ -82,7 +82,7 @@ extern void cpu_proc_init(void); ...@@ -82,7 +82,7 @@ extern void cpu_proc_init(void);
extern void cpu_proc_fin(void); extern void cpu_proc_fin(void);
extern int cpu_do_idle(void); extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int); extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else #else
...@@ -116,13 +116,25 @@ extern void cpu_resume(void); ...@@ -116,13 +116,25 @@ extern void cpu_resume(void);
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
#define cpu_get_ttbr(nr) \
({ \
u64 ttbr; \
__asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
: "=r" (ttbr)); \
ttbr; \
})
#define cpu_set_ttbr(nr, val) \
do { \
u64 ttbr = val; \
__asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
: : "r" (ttbr)); \
} while (0)
#define cpu_get_pgd() \ #define cpu_get_pgd() \
({ \ ({ \
unsigned long pg, pg2; \ u64 pg = cpu_get_ttbr(0); \
__asm__("mrrc p15, 0, %0, %1, c2" \
: "=r" (pg), "=r" (pg2) \
: \
: "cc"); \
pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
(pgd_t *)phys_to_virt(pg); \ (pgd_t *)phys_to_virt(pg); \
}) })
......
...@@ -25,6 +25,6 @@ ...@@ -25,6 +25,6 @@
#define HWCAP_IDIVT (1 << 18) #define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#endif /* _UAPI__ASMARM_HWCAP_H */ #endif /* _UAPI__ASMARM_HWCAP_H */
...@@ -156,7 +156,7 @@ ENDPROC(stext) ...@@ -156,7 +156,7 @@ ENDPROC(stext)
* *
* Returns: * Returns:
* r0, r3, r5-r7 corrupted * r0, r3, r5-r7 corrupted
* r4 = physical page table address * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
*/ */
__create_page_tables: __create_page_tables:
pgtbl r4, r8 @ page table address pgtbl r4, r8 @ page table address
...@@ -331,6 +331,7 @@ __create_page_tables: ...@@ -331,6 +331,7 @@ __create_page_tables:
#endif #endif
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
sub r4, r4, #0x1000 @ point to the PGD table sub r4, r4, #0x1000 @ point to the PGD table
mov r4, r4, lsr #ARCH_PGD_SHIFT
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(__create_page_tables) ENDPROC(__create_page_tables)
...@@ -408,7 +409,7 @@ __secondary_data: ...@@ -408,7 +409,7 @@ __secondary_data:
* r0 = cp#15 control register * r0 = cp#15 control register
* r1 = machine ID * r1 = machine ID
* r2 = atags or dtb pointer * r2 = atags or dtb pointer
* r4 = page table pointer * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
* r9 = processor ID * r9 = processor ID
* r13 = *virtual* address to jump to upon completion * r13 = *virtual* address to jump to upon completion
*/ */
...@@ -427,10 +428,7 @@ __enable_mmu: ...@@ -427,10 +428,7 @@ __enable_mmu:
#ifdef CONFIG_CPU_ICACHE_DISABLE #ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I bic r0, r0, #CR_I
#endif #endif
#ifdef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
mov r5, #0
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
......
...@@ -367,7 +367,7 @@ void __init early_print(const char *str, ...) ...@@ -367,7 +367,7 @@ void __init early_print(const char *str, ...)
static void __init cpuid_init_hwcaps(void) static void __init cpuid_init_hwcaps(void)
{ {
unsigned int divide_instrs; unsigned int divide_instrs, vmsa;
if (cpu_architecture() < CPU_ARCH_ARMv7) if (cpu_architecture() < CPU_ARCH_ARMv7)
return; return;
...@@ -380,6 +380,11 @@ static void __init cpuid_init_hwcaps(void) ...@@ -380,6 +380,11 @@ static void __init cpuid_init_hwcaps(void)
case 1: case 1:
elf_hwcap |= HWCAP_IDIVT; elf_hwcap |= HWCAP_IDIVT;
} }
/* LPAE implies atomic ldrd/strd instructions */
vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
if (vmsa >= 5)
elf_hwcap |= HWCAP_LPAE;
} }
static void __init feat_v6_fixup(void) static void __init feat_v6_fixup(void)
...@@ -892,6 +897,7 @@ static const char *hwcap_str[] = { ...@@ -892,6 +897,7 @@ static const char *hwcap_str[] = {
"vfpv4", "vfpv4",
"idiva", "idiva",
"idivt", "idivt",
"lpae",
NULL NULL
}; };
......
...@@ -79,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops) ...@@ -79,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops)
smp_ops = *ops; smp_ops = *ops;
}; };
static unsigned long get_arch_pgd(pgd_t *pgd)
{
phys_addr_t pgdir = virt_to_phys(pgd);
BUG_ON(pgdir & ARCH_PGD_MASK);
return pgdir >> ARCH_PGD_SHIFT;
}
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{ {
int ret; int ret;
...@@ -93,8 +100,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -93,8 +100,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
#endif #endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.pgdir = get_arch_pgd(idmap_pgd);
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
#endif #endif
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/thread_notify.h> #include <asm/thread_notify.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/proc-fns.h>
/* /*
* On ARMv6, we have the following structure in the Context ID: * On ARMv6, we have the following structure in the Context ID:
...@@ -55,17 +56,11 @@ static cpumask_t tlb_flush_pending; ...@@ -55,17 +56,11 @@ static cpumask_t tlb_flush_pending;
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void) static void cpu_set_reserved_ttbr0(void)
{ {
unsigned long ttbl = __pa(swapper_pg_dir);
unsigned long ttbh = 0;
/* /*
* Set TTBR0 to swapper_pg_dir which contains only global entries. The * Set TTBR0 to swapper_pg_dir which contains only global entries. The
* ASID is set to 0. * ASID is set to 0.
*/ */
asm volatile( cpu_set_ttbr(0, __pa(swapper_pg_dir));
" mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
:
: "r" (ttbl), "r" (ttbh));
isb(); isb();
} }
#else #else
......
...@@ -36,12 +36,13 @@ ...@@ -36,12 +36,13 @@
#include "mm.h" #include "mm.h"
static unsigned long phys_initrd_start __initdata = 0; static phys_addr_t phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0; static unsigned long phys_initrd_size __initdata = 0;
static int __init early_initrd(char *p) static int __init early_initrd(char *p)
{ {
unsigned long start, size; phys_addr_t start;
unsigned long size;
char *endp; char *endp;
start = memparse(p, &endp); start = memparse(p, &endp);
...@@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) ...@@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size && if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size); (u64)phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0; phys_initrd_start = phys_initrd_size = 0;
} }
if (phys_initrd_size && if (phys_initrd_size &&
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size); (u64)phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0; phys_initrd_start = phys_initrd_size = 0;
} }
if (phys_initrd_size) { if (phys_initrd_size) {
...@@ -442,7 +443,7 @@ static inline void ...@@ -442,7 +443,7 @@ static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn) free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{ {
struct page *start_pg, *end_pg; struct page *start_pg, *end_pg;
unsigned long pg, pgend; phys_addr_t pg, pgend;
/* /*
* Convert start_pfn/end_pfn to a struct page pointer. * Convert start_pfn/end_pfn to a struct page pointer.
...@@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) ...@@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
* Convert to physical addresses, and * Convert to physical addresses, and
* round start upwards and end downwards. * round start upwards and end downwards.
*/ */
pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); pg = PAGE_ALIGN(__pa(start_pg));
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; pgend = __pa(end_pg) & PAGE_MASK;
/* /*
* If there are free pages between these, * If there are free pages between these,
......
...@@ -673,7 +673,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, ...@@ -673,7 +673,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
} }
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys, const struct mem_type *type) unsigned long end, phys_addr_t phys,
const struct mem_type *type)
{ {
pud_t *pud = pud_offset(pgd, addr); pud_t *pud = pud_offset(pgd, addr);
unsigned long next; unsigned long next;
...@@ -987,27 +988,28 @@ phys_addr_t arm_lowmem_limit __initdata = 0; ...@@ -987,27 +988,28 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void) void __init sanity_check_meminfo(void)
{ {
int i, j, highmem = 0; int i, j, highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) { for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j]; struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i]; phys_addr_t size_limit;
if (bank->start > ULONG_MAX) *bank = meminfo.bank[i];
highmem = 1; size_limit = bank->size;
#ifdef CONFIG_HIGHMEM if (bank->start >= vmalloc_limit)
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET)
highmem = 1; highmem = 1;
else
size_limit = vmalloc_limit - bank->start;
bank->highmem = highmem; bank->highmem = highmem;
#ifdef CONFIG_HIGHMEM
/* /*
* Split those memory banks which are partially overlapping * Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later. * the vmalloc area greatly simplifying things later.
*/ */
if (!highmem && __va(bank->start) < vmalloc_min && if (!highmem && bank->size > size_limit) {
bank->size > vmalloc_min - __va(bank->start)) {
if (meminfo.nr_banks >= NR_BANKS) { if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, " printk(KERN_CRIT "NR_BANKS too low, "
"ignoring high memory\n"); "ignoring high memory\n");
...@@ -1016,16 +1018,14 @@ void __init sanity_check_meminfo(void) ...@@ -1016,16 +1018,14 @@ void __init sanity_check_meminfo(void)
(meminfo.nr_banks - i) * sizeof(*bank)); (meminfo.nr_banks - i) * sizeof(*bank));
meminfo.nr_banks++; meminfo.nr_banks++;
i++; i++;
bank[1].size -= vmalloc_min - __va(bank->start); bank[1].size -= size_limit;
bank[1].start = __pa(vmalloc_min - 1) + 1; bank[1].start = vmalloc_limit;
bank[1].highmem = highmem = 1; bank[1].highmem = highmem = 1;
j++; j++;
} }
bank->size = vmalloc_min - __va(bank->start); bank->size = size_limit;
} }
#else #else
bank->highmem = highmem;
/* /*
* Highmem banks not allowed with !CONFIG_HIGHMEM. * Highmem banks not allowed with !CONFIG_HIGHMEM.
*/ */
...@@ -1037,32 +1037,17 @@ void __init sanity_check_meminfo(void) ...@@ -1037,32 +1037,17 @@ void __init sanity_check_meminfo(void)
continue; continue;
} }
/*
* Check whether this memory bank would entirely overlap
* the vmalloc area.
*/
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET) {
printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
"(vmalloc region overlap).\n",
(unsigned long long)bank->start,
(unsigned long long)bank->start + bank->size - 1);
continue;
}
/* /*
* Check whether this memory bank would partially overlap * Check whether this memory bank would partially overlap
* the vmalloc area. * the vmalloc area.
*/ */
if (__va(bank->start + bank->size - 1) >= vmalloc_min || if (bank->size > size_limit) {
__va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n", "to -%.8llx (vmalloc region overlap).\n",
(unsigned long long)bank->start, (unsigned long long)bank->start,
(unsigned long long)bank->start + bank->size - 1, (unsigned long long)bank->start + bank->size - 1,
(unsigned long long)bank->start + newsize - 1); (unsigned long long)bank->start + size_limit - 1);
bank->size = newsize; bank->size = size_limit;
} }
#endif #endif
if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
......
...@@ -39,6 +39,14 @@ ...@@ -39,6 +39,14 @@
#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
#ifndef __ARMEB__
# define rpgdl r0
# define rpgdh r1
#else
# define rpgdl r1
# define rpgdh r0
#endif
/* /*
* cpu_v7_switch_mm(pgd_phys, tsk) * cpu_v7_switch_mm(pgd_phys, tsk)
* *
...@@ -47,10 +55,10 @@ ...@@ -47,10 +55,10 @@
*/ */
ENTRY(cpu_v7_switch_mm) ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id mmid r2, r2
asid r3, r1 asid r2, r2
mov r3, r3, lsl #(48 - 32) @ ASID orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd
mcrr p15, 0, r0, r3, c2 @ set TTB 0 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0
isb isb
#endif #endif
mov pc, lr mov pc, lr
...@@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext) ...@@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext)
*/ */
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT
cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET?
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
orr \tmp, \tmp, #TTB_EAE orr \tmp, \tmp, #TTB_EAE
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
...@@ -114,27 +123,21 @@ ENDPROC(cpu_v7_set_pte_ext) ...@@ -114,27 +123,21 @@ ENDPROC(cpu_v7_set_pte_ext)
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
/* /*
* TTBR0/TTBR1 split (PAGE_OFFSET): * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above),
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used) * otherwise booting secondary CPUs would end up using TTBR1 for the
* 0x80000000: T0SZ = 0, T1SZ = 1 * identity mapping set up in TTBR0.
* 0xc0000000: T0SZ = 0, T1SZ = 2
*
* Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
* booting secondary CPUs would end up using TTBR1 for the identity
* mapping set up in TTBR0.
*/ */
bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
#if defined CONFIG_VMSPLIT_2G mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
/* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries addls \ttbr1, \ttbr1, #TTBR1_OFFSET
#elif defined CONFIG_VMSPLIT_3G mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
/* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
#endif mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
/* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
.endm .endm
__CPUINIT __CPUINIT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment