Commit 172dcd93 authored by Paul Burton's avatar Paul Burton

MIPS: Always allocate exception vector for MIPSr2+

Currently we allocate the exception vector on systems which use a
vectored interrupt mode, but otherwise attempt to reuse whatever
exception vector the bootloader uses.

This can be problematic for a number of reasons:

  1) The memory isn't properly marked reserved in the memblock
     allocator. We've relied on the fact that EBase is generally in the
     memory below the kernel image which we don't free, but this is
     about to change.

  2) Recent versions of U-Boot place their exception vector high in
     kseg0, in memory which isn't protected by being lower than the
     kernel anyway & can end up being clobbered.

  3) We are unnecessarily reliant upon there being memory at the address
     EBase points to upon entry to the kernel. This is often the case,
     but if the bootloader doesn't configure EBase & leaves it with its
     default value then we rely upon there being memory at physical
     address 0 for no good reason.

Improve this situation by allocating the exception vector in all cases
when running on MIPSr2 or higher, and reserving the memory for MIPSr1 or
lower. This ensures we don't clobber the exception vector in any
configuration, and for MIPSr2 & higher removes the need for memory at
physical address 0.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Reviewed-by: default avatarSerge Semin <fancer.lancer@gmail.com>
Tested-by: default avatarSerge Semin <fancer.lancer@gmail.com>
Cc: linux-mips@vger.kernel.org
parent f995adb0
...@@ -2284,18 +2284,27 @@ void __init trap_init(void) ...@@ -2284,18 +2284,27 @@ void __init trap_init(void)
extern char except_vec3_generic; extern char except_vec3_generic;
extern char except_vec4; extern char except_vec4;
extern char except_vec3_r4000; extern char except_vec3_r4000;
unsigned long i; unsigned long i, vec_size;
phys_addr_t ebase_pa;
check_wait(); check_wait();
if (cpu_has_veic || cpu_has_vint) { if (!cpu_has_mips_r2_r6) {
unsigned long size = 0x200 + VECTORSPACING*64; ebase = CAC_BASE;
phys_addr_t ebase_pa; ebase_pa = virt_to_phys((void *)ebase);
vec_size = 0x400;
memblock_reserve(ebase_pa, vec_size);
} else {
if (cpu_has_veic || cpu_has_vint)
vec_size = 0x200 + VECTORSPACING*64;
else
vec_size = PAGE_SIZE;
ebase_pa = memblock_phys_alloc(size, 1 << fls(size)); ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
if (!ebase_pa) if (!ebase_pa)
panic("%s: Failed to allocate %lu bytes align=0x%x\n", panic("%s: Failed to allocate %lu bytes align=0x%x\n",
__func__, size, 1 << fls(size)); __func__, vec_size, 1 << fls(vec_size));
/* /*
* Try to ensure ebase resides in KSeg0 if possible. * Try to ensure ebase resides in KSeg0 if possible.
...@@ -2312,20 +2321,6 @@ void __init trap_init(void) ...@@ -2312,20 +2321,6 @@ void __init trap_init(void)
ebase = CKSEG0ADDR(ebase_pa); ebase = CKSEG0ADDR(ebase_pa);
else else
ebase = (unsigned long)phys_to_virt(ebase_pa); ebase = (unsigned long)phys_to_virt(ebase_pa);
} else {
ebase = CAC_BASE;
if (cpu_has_mips_r2_r6) {
if (cpu_has_ebase_wg) {
#ifdef CONFIG_64BIT
ebase = (read_c0_ebase_64() & ~0xfff);
#else
ebase = (read_c0_ebase() & ~0xfff);
#endif
} else {
ebase += (read_c0_ebase() & 0x3ffff000);
}
}
} }
if (cpu_has_mmips) { if (cpu_has_mmips) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment