Commit 4a9cb360 authored by Russell King's avatar Russell King

ARM: fixup SMP alternatives in modules

With certain configurations, we inline the unlock functions in modules,
which results in SMP alternatives being created in modules.  We need to
fix those up when loading a module to prevent undefined instruction
faults.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 0193c00e
...@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) ...@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP #ifdef CONFIG_SMP_ON_UP
__INIT
__fixup_smp: __fixup_smp:
and r3, r9, #0x000f0000 @ architecture version and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported? teq r3, #0x000f0000 @ CPU ID supported?
...@@ -415,18 +416,7 @@ __fixup_smp_on_up: ...@@ -415,18 +416,7 @@ __fixup_smp_on_up:
sub r3, r0, r3 sub r3, r0, r3
add r4, r4, r3 add r4, r4, r3
add r5, r5, r3 add r5, r5, r3
2: cmp r4, r5 b __do_fixup_smp_on_up
movhs pc, lr
ldmia r4!, {r0, r6}
ARM( str r6, [r0, r3] )
THUMB( add r0, r0, r3 )
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
THUMB( strh r6, [r0] )
b 2b
ENDPROC(__fixup_smp) ENDPROC(__fixup_smp)
.align .align
...@@ -440,7 +430,31 @@ smp_on_up: ...@@ -440,7 +430,31 @@ smp_on_up:
ALT_SMP(.long 1) ALT_SMP(.long 1)
ALT_UP(.long 0) ALT_UP(.long 0)
.popsection .popsection
#endif
.text
__do_fixup_smp_on_up:
cmp r4, r5
movhs pc, lr
ldmia r4!, {r0, r6}
ARM( str r6, [r0, r3] )
THUMB( add r0, r0, r3 )
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif #endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
THUMB( strh r6, [r0] )
b __do_fixup_smp_on_up
ENDPROC(__do_fixup_smp_on_up)
ENTRY(fixup_smp)
stmfd sp!, {r4 - r6, lr}
mov r4, r0
add r5, r0, r1
mov r3, #0
bl __do_fixup_smp_on_up
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
#include "head-common.S" #include "head-common.S"
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h> #include <asm/unwind.h>
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
...@@ -268,12 +269,28 @@ struct mod_unwind_map { ...@@ -268,12 +269,28 @@ struct mod_unwind_map {
const Elf_Shdr *txt_sec; const Elf_Shdr *txt_sec;
}; };
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
const Elf_Shdr *sechdrs, const char *name)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
if (strcmp(name, secstrs + s->sh_name) == 0)
return s;
return NULL;
}
extern void fixup_smp(const void *, unsigned long);
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod) struct module *mod)
{ {
const Elf_Shdr * __maybe_unused s = NULL;
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX]; struct mod_unwind_map maps[ARM_SEC_MAX];
int i; int i;
...@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, ...@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size); maps[i].txt_sec->sh_size);
#endif #endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())
fixup_smp((void *)s->sh_addr, s->sh_size);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment