Commit d6edf951 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - amba bus updates

 - simplify ldr_this_cpu assembler macro for uniprocessor builds

 - avoid explicit assembler literal loads

 - more spectre-bhb improvements

 - add Cortex-A9 Errata 764319 workaround

 - add all unwind tables for modules

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 9204/2: module: Add all unwind tables when load module
  ARM: 9206/1: A9: Add ARM ERRATA 764319 workaround (Updated)
  ARM: 9201/1: spectre-bhb: rely on linker to emit cross-section literal loads
  ARM: 9200/1: spectre-bhb: avoid cross-subsection jump using a numbered label
  ARM: 9199/1: spectre-bhb: use local DSB and elide ISB in loop8 sequence
  ARM: 9198/1: spectre-bhb: simplify BPIALL vector macro
  ARM: 9195/1: entry: avoid explicit literal loads
  ARM: 9194/1: assembler: simplify ldr_this_cpu for !SMP builds
  ARM: 9192/1: amba: fix memory leak in amba_device_try_add()
  ARM: 9193/1: amba: Add amba_read_periphid() helper
parents 95fbef17 b6f21d14
...@@ -972,6 +972,17 @@ config ARM_ERRATA_764369 ...@@ -972,6 +972,17 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU. in the diagnostic control register of the SCU.
config ARM_ERRATA_764319
bool "ARM errata: Read to DBGPRSR and DBGOSLSR may generate Undefined instruction"
depends on CPU_V7
help
This option enables the workaround for the 764319 Cortex A-9 erratum.
CP14 read accesses to the DBGPRSR and DBGOSLSR registers generate an
unexpected Undefined Instruction exception when the DBGSWENABLE
external pin is set to 0, even when the CP14 accesses are performed
from a privileged mode. This work around catches the exception in a
way the kernel does not stop execution.
config ARM_ERRATA_775420 config ARM_ERRATA_775420
bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
depends on CPU_V7 depends on CPU_V7
......
...@@ -666,12 +666,11 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -666,12 +666,11 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
__adldst_l str, \src, \sym, \tmp, \cond __adldst_l str, \src, \sym, \tmp, \cond
.endm .endm
.macro __ldst_va, op, reg, tmp, sym, cond .macro __ldst_va, op, reg, tmp, sym, cond, offset
#if __LINUX_ARM_ARCH__ >= 7 || \ #if __LINUX_ARM_ARCH__ >= 7 || \
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
mov_l \tmp, \sym, \cond mov_l \tmp, \sym, \cond
\op\cond \reg, [\tmp]
#else #else
/* /*
* Avoid a literal load, by emitting a sequence of ADD/LDR instructions * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
...@@ -683,24 +682,29 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -683,24 +682,29 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
.L0_\@: sub\cond \tmp, pc, #8 .L0_\@: sub\cond \tmp, pc, #8 - \offset
.L1_\@: sub\cond \tmp, \tmp, #4 .L1_\@: sub\cond \tmp, \tmp, #4 - \offset
.L2_\@: \op\cond \reg, [\tmp, #0] .L2_\@:
#endif #endif
\op\cond \reg, [\tmp, #\offset]
.endm .endm
/* /*
* ldr_va - load a 32-bit word from the virtual address of \sym * ldr_va - load a 32-bit word from the virtual address of \sym
*/ */
.macro ldr_va, rd:req, sym:req, cond .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
__ldst_va ldr, \rd, \rd, \sym, \cond .ifnb \tmp
__ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
.else
__ldst_va ldr, \rd, \rd, \sym, \cond, \offset
.endif
.endm .endm
/* /*
* str_va - store a 32-bit word to the virtual address of \sym * str_va - store a 32-bit word to the virtual address of \sym
*/ */
.macro str_va, rn:req, sym:req, tmp:req, cond .macro str_va, rn:req, sym:req, tmp:req, cond
__ldst_va str, \rn, \tmp, \sym, \cond __ldst_va str, \rn, \tmp, \sym, \cond, 0
.endm .endm
/* /*
...@@ -727,7 +731,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -727,7 +731,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
* are permitted to overlap with 'rd' if != sp * are permitted to overlap with 'rd' if != sp
*/ */
.macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
#if __LINUX_ARM_ARCH__ >= 7 || \ #ifndef CONFIG_SMP
ldr_va \rd, \sym, tmp=\t1
#elif __LINUX_ARM_ARCH__ >= 7 || \
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
this_cpu_offset \t1 this_cpu_offset \t1
......
...@@ -3,20 +3,10 @@ ...@@ -3,20 +3,10 @@
#define _ASM_ARM_MODULE_H #define _ASM_ARM_MODULE_H
#include <asm-generic/module.h> #include <asm-generic/module.h>
#include <asm/unwind.h>
struct unwind_table;
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
enum { #define ELF_SECTION_UNWIND 0x70000001
ARM_SEC_INIT,
ARM_SEC_DEVINIT,
ARM_SEC_CORE,
ARM_SEC_EXIT,
ARM_SEC_DEVEXIT,
ARM_SEC_HOT,
ARM_SEC_UNLIKELY,
ARM_SEC_MAX,
};
#endif #endif
#define PLT_ENT_STRIDE L1_CACHE_BYTES #define PLT_ENT_STRIDE L1_CACHE_BYTES
...@@ -36,7 +26,8 @@ struct mod_plt_sec { ...@@ -36,7 +26,8 @@ struct mod_plt_sec {
struct mod_arch_specific { struct mod_arch_specific {
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
struct unwind_table *unwind[ARM_SEC_MAX]; struct list_head unwind_list;
struct unwind_table *init_table;
#endif #endif
#ifdef CONFIG_ARM_MODULE_PLTS #ifdef CONFIG_ARM_MODULE_PLTS
struct mod_plt_sec core; struct mod_plt_sec core;
......
...@@ -24,6 +24,7 @@ struct unwind_idx { ...@@ -24,6 +24,7 @@ struct unwind_idx {
struct unwind_table { struct unwind_table {
struct list_head list; struct list_head list;
struct list_head mod_list;
const struct unwind_idx *start; const struct unwind_idx *start;
const struct unwind_idx *origin; const struct unwind_idx *origin;
const struct unwind_idx *stop; const struct unwind_idx *stop;
......
...@@ -61,9 +61,8 @@ ...@@ -61,9 +61,8 @@
.macro pabt_helper .macro pabt_helper
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
#ifdef MULTI_PABORT #ifdef MULTI_PABORT
ldr ip, .LCprocfns ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
mov lr, pc bl_r ip
ldr pc, [ip, #PROCESSOR_PABT_FUNC]
#else #else
bl CPU_PABORT_HANDLER bl CPU_PABORT_HANDLER
#endif #endif
...@@ -82,9 +81,8 @@ ...@@ -82,9 +81,8 @@
@ the fault status register in r1. r9 must be preserved. @ the fault status register in r1. r9 must be preserved.
@ @
#ifdef MULTI_DABORT #ifdef MULTI_DABORT
ldr ip, .LCprocfns ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
mov lr, pc bl_r ip
ldr pc, [ip, #PROCESSOR_DABT_FUNC]
#else #else
bl CPU_DABORT_HANDLER bl CPU_DABORT_HANDLER
#endif #endif
...@@ -302,16 +300,6 @@ __fiq_svc: ...@@ -302,16 +300,6 @@ __fiq_svc:
UNWIND(.fnend ) UNWIND(.fnend )
ENDPROC(__fiq_svc) ENDPROC(__fiq_svc)
.align 5
.LCcralign:
.word cr_alignment
#ifdef MULTI_DABORT
.LCprocfns:
.word processor
#endif
.LCfp:
.word fp_enter
/* /*
* Abort mode handlers * Abort mode handlers
*/ */
...@@ -370,7 +358,7 @@ ENDPROC(__fiq_abt) ...@@ -370,7 +358,7 @@ ENDPROC(__fiq_abt)
THUMB( stmia sp, {r0 - r12} ) THUMB( stmia sp, {r0 - r12} )
ATRAP( mrc p15, 0, r7, c1, c0, 0) ATRAP( mrc p15, 0, r7, c1, c0, 0)
ATRAP( ldr r8, .LCcralign) ATRAP( ldr_va r8, cr_alignment)
ldmia r0, {r3 - r5} ldmia r0, {r3 - r5}
add r0, sp, #S_PC @ here for interlock avoidance add r0, sp, #S_PC @ here for interlock avoidance
...@@ -379,8 +367,6 @@ ENDPROC(__fiq_abt) ...@@ -379,8 +367,6 @@ ENDPROC(__fiq_abt)
str r3, [sp] @ save the "real" r0 copied str r3, [sp] @ save the "real" r0 copied
@ from the exception stack @ from the exception stack
ATRAP( ldr r8, [r8, #0])
@ @
@ We are now ready to fill in the remaining blanks on the stack: @ We are now ready to fill in the remaining blanks on the stack:
@ @
...@@ -505,9 +491,7 @@ __und_usr_thumb: ...@@ -505,9 +491,7 @@ __und_usr_thumb:
*/ */
#if __LINUX_ARM_ARCH__ < 7 #if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
#define NEED_CPU_ARCHITECTURE ldr_va r5, cpu_architecture
ldr r5, .LCcpu_architecture
ldr r5, [r5]
cmp r5, #CPU_ARCH_ARMv7 cmp r5, #CPU_ARCH_ARMv7
blo __und_usr_fault_16 @ 16bit undefined instruction blo __und_usr_fault_16 @ 16bit undefined instruction
/* /*
...@@ -654,12 +638,6 @@ call_fpe: ...@@ -654,12 +638,6 @@ call_fpe:
ret.w lr @ CP#14 (Debug) ret.w lr @ CP#14 (Debug)
ret.w lr @ CP#15 (Control) ret.w lr @ CP#15 (Control)
#ifdef NEED_CPU_ARCHITECTURE
.align 2
.LCcpu_architecture:
.word __cpu_architecture
#endif
#ifdef CONFIG_NEON #ifdef CONFIG_NEON
.align 6 .align 6
...@@ -685,9 +663,8 @@ call_fpe: ...@@ -685,9 +663,8 @@ call_fpe:
#endif #endif
do_fpe: do_fpe:
ldr r4, .LCfp
add r10, r10, #TI_FPSTATE @ r10 = workspace add r10, r10, #TI_FPSTATE @ r10 = workspace
ldr pc, [r4] @ Call FP module USR entry point ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point
/* /*
* The FP module is called with these registers set: * The FP module is called with these registers set:
...@@ -1101,6 +1078,12 @@ __kuser_helper_end: ...@@ -1101,6 +1078,12 @@ __kuser_helper_end:
*/ */
.macro vector_stub, name, mode, correction=0 .macro vector_stub, name, mode, correction=0
.align 5 .align 5
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
vector_bhb_bpiall_\name:
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
@ isb not needed due to "movs pc, lr" in the vector stub
@ which gives a "context synchronisation".
#endif
vector_\name: vector_\name:
.if \correction .if \correction
...@@ -1111,7 +1094,8 @@ vector_\name: ...@@ -1111,7 +1094,8 @@ vector_\name:
stmia sp, {r0, lr} @ save r0, lr stmia sp, {r0, lr} @ save r0, lr
@ Save spsr_<exception> (parent CPSR) @ Save spsr_<exception> (parent CPSR)
2: mrs lr, spsr .Lvec_\name:
mrs lr, spsr
str lr, [sp, #8] @ save spsr str lr, [sp, #8] @ save spsr
@ @
...@@ -1148,25 +1132,11 @@ vector_bhb_loop8_\name: ...@@ -1148,25 +1132,11 @@ vector_bhb_loop8_\name:
3: W(b) . + 4 3: W(b) . + 4
subs r0, r0, #1 subs r0, r0, #1
bne 3b bne 3b
dsb dsb nsh
isb
b 2b
ENDPROC(vector_bhb_loop8_\name)
vector_bhb_bpiall_\name:
.if \correction
sub lr, lr, #\correction
.endif
@ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr}
@ bhb workaround
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
@ isb not needed due to "movs pc, lr" in the vector stub @ isb not needed due to "movs pc, lr" in the vector stub
@ which gives a "context synchronisation". @ which gives a "context synchronisation".
b 2b b .Lvec_\name
ENDPROC(vector_bhb_bpiall_\name) ENDPROC(vector_bhb_loop8_\name)
.previous .previous
#endif #endif
...@@ -1176,10 +1146,15 @@ ENDPROC(vector_bhb_bpiall_\name) ...@@ -1176,10 +1146,15 @@ ENDPROC(vector_bhb_bpiall_\name)
.endm .endm
.section .stubs, "ax", %progbits .section .stubs, "ax", %progbits
@ This must be the first word @ These need to remain at the start of the section so that
@ they are in range of the 'SWI' entries in the vector tables
@ located 4k down.
.L__vector_swi:
.word vector_swi .word vector_swi
#ifdef CONFIG_HARDEN_BRANCH_HISTORY #ifdef CONFIG_HARDEN_BRANCH_HISTORY
.L__vector_bhb_loop8_swi:
.word vector_bhb_loop8_swi .word vector_bhb_loop8_swi
.L__vector_bhb_bpiall_swi:
.word vector_bhb_bpiall_swi .word vector_bhb_bpiall_swi
#endif #endif
...@@ -1322,10 +1297,11 @@ vector_addrexcptn: ...@@ -1322,10 +1297,11 @@ vector_addrexcptn:
.globl vector_fiq .globl vector_fiq
.section .vectors, "ax", %progbits .section .vectors, "ax", %progbits
.L__vectors_start:
W(b) vector_rst W(b) vector_rst
W(b) vector_und W(b) vector_und
W(ldr) pc, .L__vectors_start + 0x1000 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
W(ldr) pc, .
W(b) vector_pabt W(b) vector_pabt
W(b) vector_dabt W(b) vector_dabt
W(b) vector_addrexcptn W(b) vector_addrexcptn
...@@ -1334,10 +1310,11 @@ vector_addrexcptn: ...@@ -1334,10 +1310,11 @@ vector_addrexcptn:
#ifdef CONFIG_HARDEN_BRANCH_HISTORY #ifdef CONFIG_HARDEN_BRANCH_HISTORY
.section .vectors.bhb.loop8, "ax", %progbits .section .vectors.bhb.loop8, "ax", %progbits
.L__vectors_bhb_loop8_start:
W(b) vector_rst W(b) vector_rst
W(b) vector_bhb_loop8_und W(b) vector_bhb_loop8_und
W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
W(ldr) pc, .
W(b) vector_bhb_loop8_pabt W(b) vector_bhb_loop8_pabt
W(b) vector_bhb_loop8_dabt W(b) vector_bhb_loop8_dabt
W(b) vector_addrexcptn W(b) vector_addrexcptn
...@@ -1345,10 +1322,11 @@ vector_addrexcptn: ...@@ -1345,10 +1322,11 @@ vector_addrexcptn:
W(b) vector_bhb_loop8_fiq W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits .section .vectors.bhb.bpiall, "ax", %progbits
.L__vectors_bhb_bpiall_start:
W(b) vector_rst W(b) vector_rst
W(b) vector_bhb_bpiall_und W(b) vector_bhb_bpiall_und
W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
W(ldr) pc, .
W(b) vector_bhb_bpiall_pabt W(b) vector_bhb_bpiall_pabt
W(b) vector_bhb_bpiall_dabt W(b) vector_bhb_bpiall_dabt
W(b) vector_addrexcptn W(b) vector_addrexcptn
......
...@@ -164,7 +164,7 @@ ENTRY(vector_bhb_loop8_swi) ...@@ -164,7 +164,7 @@ ENTRY(vector_bhb_loop8_swi)
1: b 2f 1: b 2f
2: subs r8, r8, #1 2: subs r8, r8, #1
bne 1b bne 1b
dsb dsb nsh
isb isb
b 3f b 3f
ENDPROC(vector_bhb_loop8_swi) ENDPROC(vector_bhb_loop8_swi)
...@@ -198,7 +198,7 @@ ENTRY(vector_swi) ...@@ -198,7 +198,7 @@ ENTRY(vector_swi)
#endif #endif
reload_current r10, ip reload_current r10, ip
zero_fp zero_fp
alignment_trap r10, ip, __cr_alignment alignment_trap r10, ip, cr_alignment
asm_trace_hardirqs_on save=0 asm_trace_hardirqs_on save=0
enable_irq_notrace enable_irq_notrace
ct_user_exit save=0 ct_user_exit save=0
...@@ -328,14 +328,6 @@ __sys_trace_return: ...@@ -328,14 +328,6 @@ __sys_trace_return:
bl syscall_trace_exit bl syscall_trace_exit
b ret_slow_syscall b ret_slow_syscall
.align 5
#ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object
__cr_alignment:
.word cr_alignment
#endif
.ltorg
.macro syscall_table_start, sym .macro syscall_table_start, sym
.equ __sys_nr, 0 .equ __sys_nr, 0
.type \sym, #object .type \sym, #object
......
...@@ -48,8 +48,7 @@ ...@@ -48,8 +48,7 @@
.macro alignment_trap, rtmp1, rtmp2, label .macro alignment_trap, rtmp1, rtmp2, label
#ifdef CONFIG_ALIGNMENT_TRAP #ifdef CONFIG_ALIGNMENT_TRAP
mrc p15, 0, \rtmp2, c1, c0, 0 mrc p15, 0, \rtmp2, c1, c0, 0
ldr \rtmp1, \label ldr_va \rtmp1, \label
ldr \rtmp1, [\rtmp1]
teq \rtmp1, \rtmp2 teq \rtmp1, \rtmp2
mcrne p15, 0, \rtmp1, c1, c0, 0 mcrne p15, 0, \rtmp1, c1, c0, 0
#endif #endif
......
...@@ -941,6 +941,23 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, ...@@ -941,6 +941,23 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
return ret; return ret;
} }
#ifdef CONFIG_ARM_ERRATA_764319
static int oslsr_fault;
static int debug_oslsr_trap(struct pt_regs *regs, unsigned int instr)
{
oslsr_fault = 1;
instruction_pointer(regs) += 4;
return 0;
}
static struct undef_hook debug_oslsr_hook = {
.instr_mask = 0xffffffff,
.instr_val = 0xee115e91,
.fn = debug_oslsr_trap,
};
#endif
/* /*
* One-time initialisation. * One-time initialisation.
*/ */
...@@ -974,7 +991,16 @@ static bool core_has_os_save_restore(void) ...@@ -974,7 +991,16 @@ static bool core_has_os_save_restore(void)
case ARM_DEBUG_ARCH_V7_1: case ARM_DEBUG_ARCH_V7_1:
return true; return true;
case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_ECP14:
#ifdef CONFIG_ARM_ERRATA_764319
oslsr_fault = 0;
register_undef_hook(&debug_oslsr_hook);
ARM_DBG_READ(c1, c1, 4, oslsr);
unregister_undef_hook(&debug_oslsr_hook);
if (oslsr_fault)
return false;
#else
ARM_DBG_READ(c1, c1, 4, oslsr); ARM_DBG_READ(c1, c1, 4, oslsr);
#endif
if (oslsr & ARM_OSLSR_OSLM0) if (oslsr & ARM_OSLSR_OSLM0)
return true; return true;
fallthrough; fallthrough;
......
...@@ -459,46 +459,40 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, ...@@ -459,46 +459,40 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX]; struct list_head *unwind_list = &mod->arch.unwind_list;
int i;
memset(maps, 0, sizeof(maps)); INIT_LIST_HEAD(unwind_list);
mod->arch.init_table = NULL;
for (s = sechdrs; s < sechdrs_end; s++) { for (s = sechdrs; s < sechdrs_end; s++) {
const char *secname = secstrs + s->sh_name; const char *secname = secstrs + s->sh_name;
const char *txtname;
const Elf_Shdr *txt_sec;
if (!(s->sh_flags & SHF_ALLOC)) if (!(s->sh_flags & SHF_ALLOC) ||
s->sh_type != ELF_SECTION_UNWIND)
continue; continue;
if (!strcmp(".ARM.exidx", secname))
txtname = ".text";
else
txtname = secname + strlen(".ARM.exidx");
txt_sec = find_mod_section(hdr, sechdrs, txtname);
if (txt_sec) {
struct unwind_table *table =
unwind_table_add(s->sh_addr,
s->sh_size,
txt_sec->sh_addr,
txt_sec->sh_size);
list_add(&table->mod_list, unwind_list);
/* save init table for module_arch_freeing_init */
if (strcmp(".ARM.exidx.init.text", secname) == 0) if (strcmp(".ARM.exidx.init.text", secname) == 0)
maps[ARM_SEC_INIT].unw_sec = s; mod->arch.init_table = table;
else if (strcmp(".ARM.exidx", secname) == 0) }
maps[ARM_SEC_CORE].unw_sec = s;
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
maps[ARM_SEC_EXIT].unw_sec = s;
else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
maps[ARM_SEC_UNLIKELY].unw_sec = s;
else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
maps[ARM_SEC_HOT].unw_sec = s;
else if (strcmp(".init.text", secname) == 0)
maps[ARM_SEC_INIT].txt_sec = s;
else if (strcmp(".text", secname) == 0)
maps[ARM_SEC_CORE].txt_sec = s;
else if (strcmp(".exit.text", secname) == 0)
maps[ARM_SEC_EXIT].txt_sec = s;
else if (strcmp(".text.unlikely", secname) == 0)
maps[ARM_SEC_UNLIKELY].txt_sec = s;
else if (strcmp(".text.hot", secname) == 0)
maps[ARM_SEC_HOT].txt_sec = s;
} }
for (i = 0; i < ARM_SEC_MAX; i++)
if (maps[i].unw_sec && maps[i].txt_sec)
mod->arch.unwind[i] =
unwind_table_add(maps[i].unw_sec->sh_addr,
maps[i].unw_sec->sh_size,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif #endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
s = find_mod_section(hdr, sechdrs, ".pv_table"); s = find_mod_section(hdr, sechdrs, ".pv_table");
...@@ -519,19 +513,27 @@ void ...@@ -519,19 +513,27 @@ void
module_arch_cleanup(struct module *mod) module_arch_cleanup(struct module *mod)
{ {
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
int i; struct unwind_table *tmp;
struct unwind_table *n;
for (i = 0; i < ARM_SEC_MAX; i++) { list_for_each_entry_safe(tmp, n,
unwind_table_del(mod->arch.unwind[i]); &mod->arch.unwind_list, mod_list) {
mod->arch.unwind[i] = NULL; list_del(&tmp->mod_list);
unwind_table_del(tmp);
} }
mod->arch.init_table = NULL;
#endif #endif
} }
void __weak module_arch_freeing_init(struct module *mod) void __weak module_arch_freeing_init(struct module *mod)
{ {
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
unwind_table_del(mod->arch.unwind[ARM_SEC_INIT]); struct unwind_table *init = mod->arch.init_table;
mod->arch.unwind[ARM_SEC_INIT] = NULL;
if (init) {
mod->arch.init_table = NULL;
list_del(&init->mod_list);
unwind_table_del(init);
}
#endif #endif
} }
...@@ -395,41 +395,20 @@ static void amba_device_release(struct device *dev) ...@@ -395,41 +395,20 @@ static void amba_device_release(struct device *dev)
kfree(d); kfree(d);
} }
static int amba_device_try_add(struct amba_device *dev, struct resource *parent) static int amba_read_periphid(struct amba_device *dev)
{ {
u32 size; struct reset_control *rstc;
u32 size, pid, cid;
void __iomem *tmp; void __iomem *tmp;
int i, ret; int i, ret;
ret = request_resource(parent, &dev->res); ret = dev_pm_domain_attach(&dev->dev, true);
if (ret) if (ret)
goto err_out; goto err_out;
/* Hard-coded primecell ID instead of plug-n-play */
if (dev->periphid != 0)
goto skip_probe;
/*
* Dynamically calculate the size of the resource
* and use this for iomap
*/
size = resource_size(&dev->res);
tmp = ioremap(dev->res.start, size);
if (!tmp) {
ret = -ENOMEM;
goto err_release;
}
ret = dev_pm_domain_attach(&dev->dev, true);
if (ret) {
iounmap(tmp);
goto err_release;
}
ret = amba_get_enable_pclk(dev); ret = amba_get_enable_pclk(dev);
if (ret == 0) { if (ret)
u32 pid, cid; goto err_pm;
struct reset_control *rstc;
/* /*
* Find reset control(s) of the amba bus and de-assert them. * Find reset control(s) of the amba bus and de-assert them.
...@@ -438,36 +417,36 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) ...@@ -438,36 +417,36 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
if (IS_ERR(rstc)) { if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc); ret = PTR_ERR(rstc);
if (ret != -EPROBE_DEFER) if (ret != -EPROBE_DEFER)
dev_err(&dev->dev, "can't get reset: %d\n", dev_err(&dev->dev, "can't get reset: %d\n", ret);
ret); goto err_clk;
goto err_reset;
} }
reset_control_deassert(rstc); reset_control_deassert(rstc);
reset_control_put(rstc); reset_control_put(rstc);
size = resource_size(&dev->res);
tmp = ioremap(dev->res.start, size);
if (!tmp) {
ret = -ENOMEM;
goto err_clk;
}
/* /*
* Read pid and cid based on size of resource * Read pid and cid based on size of resource
* they are located at end of region * they are located at end of region
*/ */
for (pid = 0, i = 0; i < 4; i++) for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8);
(i * 8);
for (cid = 0, i = 0; i < 4; i++) for (cid = 0, i = 0; i < 4; i++)
cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8);
(i * 8);
if (cid == CORESIGHT_CID) { if (cid == CORESIGHT_CID) {
/* set the base to the start of the last 4k block */ /* set the base to the start of the last 4k block */
void __iomem *csbase = tmp + size - 4096; void __iomem *csbase = tmp + size - 4096;
dev->uci.devarch = dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET);
readl(csbase + UCI_REG_DEVARCH_OFFSET); dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
dev->uci.devtype =
readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
} }
amba_put_disable_pclk(dev);
if (cid == AMBA_CID || cid == CORESIGHT_CID) { if (cid == AMBA_CID || cid == CORESIGHT_CID) {
dev->periphid = pid; dev->periphid = pid;
dev->cid = cid; dev->cid = cid;
...@@ -475,27 +454,45 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) ...@@ -475,27 +454,45 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
if (!dev->periphid) if (!dev->periphid)
ret = -ENODEV; ret = -ENODEV;
}
iounmap(tmp); iounmap(tmp);
err_clk:
amba_put_disable_pclk(dev);
err_pm:
dev_pm_domain_detach(&dev->dev, true); dev_pm_domain_detach(&dev->dev, true);
err_out:
return ret;
}
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
int ret;
ret = request_resource(parent, &dev->res);
if (ret) if (ret)
goto err_out;
/* Hard-coded primecell ID instead of plug-n-play */
if (dev->periphid != 0)
goto skip_probe;
ret = amba_read_periphid(dev);
if (ret) {
if (ret != -EPROBE_DEFER) {
amba_device_put(dev);
goto err_out;
}
goto err_release; goto err_release;
}
skip_probe: skip_probe:
ret = device_add(&dev->dev); ret = device_add(&dev->dev);
err_release: err_release:
if (ret) if (ret)
release_resource(&dev->res); release_resource(&dev->res);
err_out: err_out:
return ret; return ret;
err_reset:
amba_put_disable_pclk(dev);
iounmap(tmp);
dev_pm_domain_detach(&dev->dev, true);
goto err_release;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment