Commit 3ee0fc5c authored by Russell King's avatar Russell King

Merge branch 'kexec/idmap' of...

Merge branch 'kexec/idmap' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
parents deee6d53 4e8ee7de
#ifndef __ASM_IDMAP_H
#define __ASM_IDMAP_H
#include <linux/compiler.h>
#include <asm/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
extern pgd_t *idmap_pgd;
void setup_mm_for_reboot(void);
#endif /* __ASM_IDMAP_H */
...@@ -347,9 +347,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -347,9 +347,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -170,11 +170,11 @@ __create_page_tables: ...@@ -170,11 +170,11 @@ __create_page_tables:
* Create identity mapping to cater for __enable_mmu. * Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init(). * This identity mapping will be removed by paging_init().
*/ */
adr r0, __enable_mmu_loc adr r0, __turn_mmu_on_loc
ldmia r0, {r3, r5, r6} ldmia r0, {r3, r5, r6}
sub r0, r0, r3 @ virt->phys offset sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __enable_mmu add r5, r5, r0 @ phys __turn_mmu_on
add r6, r6, r0 @ phys __enable_mmu_end add r6, r6, r0 @ phys __turn_mmu_on_end
mov r5, r5, lsr #SECTION_SHIFT mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT
...@@ -287,10 +287,10 @@ __create_page_tables: ...@@ -287,10 +287,10 @@ __create_page_tables:
ENDPROC(__create_page_tables) ENDPROC(__create_page_tables)
.ltorg .ltorg
.align .align
__enable_mmu_loc: __turn_mmu_on_loc:
.long . .long .
.long __enable_mmu .long __turn_mmu_on
.long __enable_mmu_end .long __turn_mmu_on_end
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
__CPUINIT __CPUINIT
...@@ -398,15 +398,17 @@ ENDPROC(__enable_mmu) ...@@ -398,15 +398,17 @@ ENDPROC(__enable_mmu)
* other registers depend on the function called upon completion * other registers depend on the function called upon completion
*/ */
.align 5 .align 5
__turn_mmu_on: .pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0 mov r0, r0
mcr p15, 0, r0, c1, c0, 0 @ write control reg mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg mrc p15, 0, r3, c0, c0, 0 @ read id reg
mov r3, r3 mov r3, r3
mov r3, r13 mov r3, r13
mov pc, r3 mov pc, r3
__enable_mmu_end: __turn_mmu_on_end:
ENDPROC(__turn_mmu_on) ENDPROC(__turn_mmu_on)
.popsection
#ifdef CONFIG_SMP_ON_UP #ifdef CONFIG_SMP_ON_UP
......
...@@ -54,6 +54,7 @@ ENDPROC(cpu_suspend_abort) ...@@ -54,6 +54,7 @@ ENDPROC(cpu_suspend_abort)
* r0 = control register value * r0 = control register value
*/ */
.align 5 .align 5
.pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu) ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu ldr r3, =cpu_resume_after_mmu
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
...@@ -62,6 +63,7 @@ ENTRY(cpu_resume_mmu) ...@@ -62,6 +63,7 @@ ENTRY(cpu_resume_mmu)
mov r0, r0 mov r0, r0
mov pc, r3 @ jump to virtual address mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_mmu) ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu: cpu_resume_after_mmu:
bl cpu_init @ restore the und/abt/irq banked regs bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success mov r0, #0 @ return zero on success
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/idmap.h>
#include <asm/topology.h> #include <asm/topology.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
{ {
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle; struct task_struct *idle = ci->idle;
pgd_t *pgd;
int ret; int ret;
/* /*
...@@ -83,30 +83,12 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -83,30 +83,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
init_idle(idle, cpu); init_idle(idle, cpu);
} }
/*
* Allocate initial page tables to allow the new CPU to
* enable the MMU safely. This essentially means a set
* of our "standard" page tables, with the addition of
* a 1:1 mapping for the physical address of the kernel.
*/
pgd = pgd_alloc(&init_mm);
if (!pgd)
return -ENOMEM;
if (PHYS_OFFSET != PAGE_OFFSET) {
#ifndef CONFIG_HOTPLUG_CPU
identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
#endif
identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
}
/* /*
* We need to tell the secondary core where to find * We need to tell the secondary core where to find
* its stack and the page tables. * its stack and the page tables.
*/ */
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd); secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
...@@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.stack = NULL; secondary_data.stack = NULL;
secondary_data.pgdir = 0; secondary_data.pgdir = 0;
if (PHYS_OFFSET != PAGE_OFFSET) {
#ifndef CONFIG_HOTPLUG_CPU
identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
#endif
identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
}
pgd_free(&init_mm, pgd);
return ret; return ret;
} }
......
#include <linux/init.h> #include <linux/init.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static pgd_t *suspend_pgd;
extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
extern void cpu_resume_mmu(void); extern void cpu_resume_mmu(void);
...@@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) ...@@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
*save_ptr = virt_to_phys(ptr); *save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */ /* This must correspond to the LDM in cpu_resume() assembly */
*ptr++ = virt_to_phys(suspend_pgd); *ptr++ = virt_to_phys(idmap_pgd);
*ptr++ = sp; *ptr++ = sp;
*ptr++ = virt_to_phys(cpu_do_resume); *ptr++ = virt_to_phys(cpu_do_resume);
...@@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
int ret; int ret;
if (!suspend_pgd) if (!idmap_pgd)
return -EINVAL; return -EINVAL;
/* /*
...@@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret; return ret;
} }
static int __init cpu_suspend_init(void)
{
suspend_pgd = pgd_alloc(&init_mm);
if (suspend_pgd) {
unsigned long addr = virt_to_phys(cpu_resume_mmu);
identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
}
return suspend_pgd ? 0 : -ENOMEM;
}
core_initcall(cpu_suspend_init);
...@@ -13,6 +13,12 @@ ...@@ -13,6 +13,12 @@
*(.proc.info.init) \ *(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .; VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x) #define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x #define ARM_CPU_KEEP(x) x
...@@ -92,6 +98,7 @@ SECTIONS ...@@ -92,6 +98,7 @@ SECTIONS
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IDMAP_TEXT
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
*(.fixup) *(.fixup)
#endif #endif
......
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/sections.h>
pgd_t *idmap_pgd;
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot) unsigned long prot)
...@@ -28,7 +32,7 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -28,7 +32,7 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
} }
void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{ {
unsigned long prot, next; unsigned long prot, next;
...@@ -43,48 +47,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) ...@@ -43,48 +47,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
} }
#ifdef CONFIG_SMP extern char __idmap_text_start[], __idmap_text_end[];
static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
{
pmd_t *pmd = pmd_offset(pud, addr);
pmd_clear(pmd);
}
static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) static int __init init_static_idmap(void)
{ {
pud_t *pud = pud_offset(pgd, addr); phys_addr_t idmap_start, idmap_end;
unsigned long next;
do { idmap_pgd = pgd_alloc(&init_mm);
next = pud_addr_end(addr, end); if (!idmap_pgd)
idmap_del_pmd(pud, addr, next); return -ENOMEM;
} while (pud++, addr = next, addr != end);
}
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) /* Add an identity mapping for the physical address of the section. */
{ idmap_start = virt_to_phys((void *)__idmap_text_start);
unsigned long next; idmap_end = virt_to_phys((void *)__idmap_text_end);
pgd += pgd_index(addr); pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
do { (long long)idmap_start, (long long)idmap_end);
next = pgd_addr_end(addr, end); identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
idmap_del_pud(pgd, addr, next);
} while (pgd++, addr = next, addr != end); return 0;
} }
#endif early_initcall(init_static_idmap);
/* /*
* In order to soft-boot, we need to insert a 1:1 mapping in place of * In order to soft-boot, we need to switch to a 1:1 mapping for the
* the user-mode pages. This will then ensure that we have predictable * cpu_reset functions. This will then ensure that we have predictable
* results when turning the mmu off * results when turning off the mmu.
*/ */
void setup_mm_for_reboot(void) void setup_mm_for_reboot(void)
{ {
/* /* Clean and invalidate L1. */
* We need to access to user-mode page tables here. For kernel threads flush_cache_all();
* we don't have any user-mode mappings so we use the context that we
* "borrowed". /* Switch to the identity mapping. */
*/ cpu_switch_mm(idmap_pgd, &init_mm);
identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
/* Flush the TLB. */
local_flush_tlb_all(); local_flush_tlb_all();
} }
...@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin) ...@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020_reset) ENTRY(cpu_arm1020_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset) ...@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm1020_reset)
.popsection
/* /*
* cpu_arm1020_do_idle() * cpu_arm1020_do_idle()
......
...@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin) ...@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020e_reset) ENTRY(cpu_arm1020e_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset) ...@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm1020e_reset)
.popsection
/* /*
* cpu_arm1020e_do_idle() * cpu_arm1020e_do_idle()
......
...@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin) ...@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1022_reset) ENTRY(cpu_arm1022_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset) ...@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm1022_reset)
.popsection
/* /*
* cpu_arm1022_do_idle() * cpu_arm1022_do_idle()
......
...@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin) ...@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1026_reset) ENTRY(cpu_arm1026_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset) ...@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm1026_reset)
.popsection
/* /*
* cpu_arm1026_do_idle() * cpu_arm1026_do_idle()
......
...@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext) ...@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
* Params : r0 = address to jump to * Params : r0 = address to jump to
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm6_reset) ENTRY(cpu_arm6_reset)
ENTRY(cpu_arm7_reset) ENTRY(cpu_arm7_reset)
mov r1, #0 mov r1, #0
...@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset) ...@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
mov r1, #0x30 mov r1, #0x30
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm6_reset)
ENDPROC(cpu_arm7_reset)
.popsection
__CPUINIT __CPUINIT
......
...@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext) ...@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Params : r0 = address to jump to * Params : r0 = address to jump to
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm720_reset) ENTRY(cpu_arm720_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
...@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset) ...@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........ bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm720_reset)
.popsection
__CPUINIT __CPUINIT
......
...@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin) ...@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
* Params : r0 = address to jump to * Params : r0 = address to jump to
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm740_reset) ENTRY(cpu_arm740_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
...@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset) ...@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
bic ip, ip, #0x0000000c @ ............wc.. bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm740_reset)
.popsection
__CPUINIT __CPUINIT
......
...@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin) ...@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Params : loc(r0) address to jump to * Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset. * Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm7tdmi_reset) ENTRY(cpu_arm7tdmi_reset)
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm7tdmi_reset)
.popsection
__CPUINIT __CPUINIT
......
...@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin) ...@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm920_reset) ENTRY(cpu_arm920_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset) ...@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm920_reset)
.popsection
/* /*
* cpu_arm920_do_idle() * cpu_arm920_do_idle()
......
...@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin) ...@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm922_reset) ENTRY(cpu_arm922_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset) ...@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm922_reset)
.popsection
/* /*
* cpu_arm922_do_idle() * cpu_arm922_do_idle()
......
...@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin) ...@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm925_reset) ENTRY(cpu_arm925_reset)
/* Send software reset to MPU and DSP */ /* Send software reset to MPU and DSP */
mov ip, #0xff000000 mov ip, #0xff000000
...@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset) ...@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
orr ip, ip, #0x0000ce00 orr ip, ip, #0x0000ce00
mov r4, #1 mov r4, #1
strh r4, [ip, #0x10] strh r4, [ip, #0x10]
ENDPROC(cpu_arm925_reset)
.popsection
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
......
...@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin) ...@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm926_reset) ENTRY(cpu_arm926_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset) ...@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm926_reset)
.popsection
/* /*
* cpu_arm926_do_idle() * cpu_arm926_do_idle()
......
...@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin) ...@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
* Params : r0 = address to jump to * Params : r0 = address to jump to
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm940_reset) ENTRY(cpu_arm940_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c5, 0 @ flush I cache
...@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset) ...@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm940_reset)
.popsection
/* /*
* cpu_arm940_do_idle() * cpu_arm940_do_idle()
......
...@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin) ...@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
* Params : r0 = address to jump to * Params : r0 = address to jump to
* Notes : This sets up everything for a reset * Notes : This sets up everything for a reset
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm946_reset) ENTRY(cpu_arm946_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache mcr p15, 0, ip, c7, c5, 0 @ flush I cache
...@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset) ...@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
bic ip, ip, #0x00001000 @ i-cache bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm946_reset)
.popsection
/* /*
* cpu_arm946_do_idle() * cpu_arm946_do_idle()
......
...@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin) ...@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Params : loc(r0) address to jump to * Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset. * Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/ */
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm9tdmi_reset) ENTRY(cpu_arm9tdmi_reset)
mov pc, r0 mov pc, r0
ENDPROC(cpu_arm9tdmi_reset)
.popsection
__CPUINIT __CPUINIT
......
...@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin) ...@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 4 .align 4
.pushsection .idmap.text, "ax"
ENTRY(cpu_fa526_reset) ENTRY(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */ /* TODO: Use CP8 if possible... */
mov ip, #0 mov ip, #0
...@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset) ...@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
nop nop
nop nop
mov pc, r0 mov pc, r0
ENDPROC(cpu_fa526_reset)
.popsection
/* /*
* cpu_fa526_do_idle() * cpu_fa526_do_idle()
......
...@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin) ...@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_feroceon_reset) ENTRY(cpu_feroceon_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset) ...@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_feroceon_reset)
.popsection
/* /*
* cpu_feroceon_do_idle() * cpu_feroceon_do_idle()
......
...@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin) ...@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
* (same as arm926) * (same as arm926)
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_mohawk_reset) ENTRY(cpu_mohawk_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset) ...@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_mohawk_reset)
.popsection
/* /*
* cpu_mohawk_do_idle() * cpu_mohawk_do_idle()
......
...@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin) ...@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa110_reset) ENTRY(cpu_sa110_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset) ...@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_sa110_reset)
.popsection
/* /*
* cpu_sa110_do_idle(type) * cpu_sa110_do_idle(type)
......
...@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin) ...@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa1100_reset) ENTRY(cpu_sa1100_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
...@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset) ...@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
ENDPROC(cpu_sa1100_reset)
.popsection
/* /*
* cpu_sa1100_do_idle(type) * cpu_sa1100_do_idle(type)
......
...@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin) ...@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
* - loc - location to jump to for soft reset * - loc - location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v6_reset) ENTRY(cpu_v6_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m bic r1, r1, #0x1 @ ...............m
...@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset) ...@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c5, 4 @ ISB mcr p15, 0, r1, c7, c5, 4 @ ISB
mov pc, r0 mov pc, r0
ENDPROC(cpu_v6_reset)
.popsection
/* /*
* cpu_v6_do_idle() * cpu_v6_do_idle()
......
...@@ -63,6 +63,7 @@ ENDPROC(cpu_v7_proc_fin) ...@@ -63,6 +63,7 @@ ENDPROC(cpu_v7_proc_fin)
* caches disabled. * caches disabled.
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset) ENTRY(cpu_v7_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m bic r1, r1, #0x1 @ ...............m
...@@ -71,6 +72,7 @@ ENTRY(cpu_v7_reset) ...@@ -71,6 +72,7 @@ ENTRY(cpu_v7_reset)
isb isb
mov pc, r0 mov pc, r0
ENDPROC(cpu_v7_reset) ENDPROC(cpu_v7_reset)
.popsection
/* /*
* cpu_v7_do_idle() * cpu_v7_do_idle()
......
...@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin) ...@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
* loc: location to jump to for soft reset * loc: location to jump to for soft reset
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xsc3_reset) ENTRY(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR msr cpsr_c, r1 @ reset CPSR
...@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset) ...@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
@ already containing those two last instructions to survive. @ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
mov pc, r0 mov pc, r0
ENDPROC(cpu_xsc3_reset)
.popsection
/* /*
* cpu_xsc3_do_idle() * cpu_xsc3_do_idle()
......
...@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin) ...@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
* Beware PXA270 erratum E7. * Beware PXA270 erratum E7.
*/ */
.align 5 .align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xscale_reset) ENTRY(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR msr cpsr_c, r1 @ reset CPSR
...@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset) ...@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
@ already containing those two last instructions to survive. @ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, r0 mov pc, r0
ENDPROC(cpu_xscale_reset)
.popsection
/* /*
* cpu_xscale_do_idle() * cpu_xscale_do_idle()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment