Commit 2eb5866c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:

 - quieten the spectre-bhb prints

 - mark flattened device tree sections as shareable

 - remove some obsolete CPU domain code and help text

 - fix thumb unaligned access abort emulation

 - fix amba_device_add() refcount underflow

 - fix literal placement

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 9208/1: entry: add .ltorg directive to keep literals in range
  ARM: 9207/1: amba: fix refcount underflow if amba_device_add() fails
  ARM: 9214/1: alignment: advance IT state after emulating Thumb instruction
  ARM: 9213/1: Print message about disabled Spectre workarounds only once
  ARM: 9212/1: domain: Modify Kconfig help text
  ARM: 9211/1: domain: drop modify_domain()
  ARM: 9210/1: Mark the FDT_FIXED sections as shareable
  ARM: 9209/1: Spectre-BHB: avoid pr_info() every time a CPU comes out of idle
parents 097da1a4 29589ca0
...@@ -112,19 +112,6 @@ static __always_inline void set_domain(unsigned int val) ...@@ -112,19 +112,6 @@ static __always_inline void set_domain(unsigned int val)
} }
#endif #endif
#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
unsigned int domain = get_domain(); \
domain &= ~domain_mask(dom); \
domain = domain | domain_val(dom, type); \
set_domain(domain); \
} while (0)
#else
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
/* /*
* Generate the T (user) versions of the LDR/STR and related * Generate the T (user) versions of the LDR/STR and related
* instructions (inline assembly) * instructions (inline assembly)
......
...@@ -27,6 +27,7 @@ enum { ...@@ -27,6 +27,7 @@ enum {
MT_HIGH_VECTORS, MT_HIGH_VECTORS,
MT_MEMORY_RWX, MT_MEMORY_RWX,
MT_MEMORY_RW, MT_MEMORY_RW,
MT_MEMORY_RO,
MT_ROM, MT_ROM,
MT_MEMORY_RWX_NONCACHED, MT_MEMORY_RWX_NONCACHED,
MT_MEMORY_RW_DTCM, MT_MEMORY_RW_DTCM,
......
...@@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) ...@@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \ ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
}) })
/*
* Update ITSTATE after normal execution of an IT block instruction.
*
* The 8 IT state bits are split into two parts in CPSR:
* ITSTATE<1:0> are in CPSR<26:25>
* ITSTATE<7:2> are in CPSR<15:10>
*/
static inline unsigned long it_advance(unsigned long cpsr)
{
if ((cpsr & 0x06000400) == 0) {
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
cpsr &= ~PSR_IT_MASK;
} else {
/* We need to shift left ITSTATE<4:0> */
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
unsigned long it = cpsr & mask;
it <<= 1;
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
it &= mask;
cpsr &= ~mask;
cpsr |= it;
}
return cpsr;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -302,6 +302,7 @@ local_restart: ...@@ -302,6 +302,7 @@ local_restart:
b ret_fast_syscall b ret_fast_syscall
#endif #endif
ENDPROC(vector_swi) ENDPROC(vector_swi)
.ltorg
/* /*
* This is the really slow path. We're going to be doing * This is the really slow path. We're going to be doing
......
...@@ -631,7 +631,11 @@ config CPU_USE_DOMAINS ...@@ -631,7 +631,11 @@ config CPU_USE_DOMAINS
bool bool
help help
This option enables or disables the use of domain switching This option enables or disables the use of domain switching
via the set_fs() function. using the DACR (domain access control register) to protect memory
domains from each other. In Linux we use three domains: kernel, user
and IO. The domains are used to protect userspace from kernelspace
and to handle IO-space as a special type of memory by assigning
manager or client roles to running code (such as a process).
config CPU_V7M_NUM_IRQ config CPU_V7M_NUM_IRQ
int "Number of external interrupts connected to the NVIC" int "Number of external interrupts connected to the NVIC"
......
...@@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (type == TYPE_LDST) if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset); do_alignment_finish_ldst(addr, instr, regs, offset);
if (thumb_mode(regs))
regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
return 0; return 0;
bad_or_fault: bad_or_fault:
......
...@@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = { ...@@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MEMORY_RO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = { [MT_ROM] = {
.prot_sect = PMD_TYPE_SECT, .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
...@@ -489,6 +496,7 @@ static void __init build_mem_type_table(void) ...@@ -489,6 +496,7 @@ static void __init build_mem_type_table(void)
/* Also setup NX memory mapping */ /* Also setup NX memory mapping */
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;
} }
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* /*
...@@ -568,6 +576,7 @@ static void __init build_mem_type_table(void) ...@@ -568,6 +576,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif #endif
/* /*
...@@ -587,6 +596,8 @@ static void __init build_mem_type_table(void) ...@@ -587,6 +596,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
...@@ -647,6 +658,8 @@ static void __init build_mem_type_table(void) ...@@ -647,6 +658,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd;
...@@ -1360,7 +1373,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) ...@@ -1360,7 +1373,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
map.virtual = FDT_FIXED_BASE; map.virtual = FDT_FIXED_BASE;
map.length = FDT_FIXED_SIZE; map.length = FDT_FIXED_SIZE;
map.type = MT_ROM; map.type = MT_MEMORY_RO;
create_mapping(&map); create_mapping(&map);
} }
......
...@@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method) ...@@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method)
#else #else
static unsigned int spectre_v2_install_workaround(unsigned int method) static unsigned int spectre_v2_install_workaround(unsigned int method)
{ {
pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", pr_info_once("Spectre V2: workarounds disabled by configuration\n");
smp_processor_id());
return SPECTRE_VULNERABLE; return SPECTRE_VULNERABLE;
} }
...@@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method) ...@@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method)
return SPECTRE_VULNERABLE; return SPECTRE_VULNERABLE;
spectre_bhb_method = method; spectre_bhb_method = method;
}
pr_info("CPU%u: Spectre BHB: using %s workaround\n", pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
smp_processor_id(), spectre_bhb_method_name(method)); smp_processor_id(), spectre_bhb_method_name(method));
}
return SPECTRE_MITIGATED; return SPECTRE_MITIGATED;
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <asm/probes.h> #include <asm/probes.h>
#include <asm/ptrace.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
void __init arm_probes_decode_init(void); void __init arm_probes_decode_init(void);
...@@ -35,31 +36,6 @@ void __init find_str_pc_offset(void); ...@@ -35,31 +36,6 @@ void __init find_str_pc_offset(void);
#endif #endif
/*
* Update ITSTATE after normal execution of an IT block instruction.
*
* The 8 IT state bits are split into two parts in CPSR:
* ITSTATE<1:0> are in CPSR<26:25>
* ITSTATE<7:2> are in CPSR<15:10>
*/
static inline unsigned long it_advance(unsigned long cpsr)
{
if ((cpsr & 0x06000400) == 0) {
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
cpsr &= ~PSR_IT_MASK;
} else {
/* We need to shift left ITSTATE<4:0> */
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
unsigned long it = cpsr & mask;
it <<= 1;
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
it &= mask;
cpsr &= ~mask;
cpsr |= it;
}
return cpsr;
}
static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs) static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
{ {
long cpsr = regs->ARM_cpsr; long cpsr = regs->ARM_cpsr;
......
...@@ -493,13 +493,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) ...@@ -493,13 +493,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
goto skip_probe; goto skip_probe;
ret = amba_read_periphid(dev); ret = amba_read_periphid(dev);
if (ret) { if (ret)
if (ret != -EPROBE_DEFER) {
amba_device_put(dev);
goto err_out;
}
goto err_release; goto err_release;
}
skip_probe: skip_probe:
ret = device_add(&dev->dev); ret = device_add(&dev->dev);
...@@ -546,6 +541,7 @@ static int amba_deferred_retry(void) ...@@ -546,6 +541,7 @@ static int amba_deferred_retry(void)
continue; continue;
list_del_init(&ddev->node); list_del_init(&ddev->node);
amba_device_put(ddev->dev);
kfree(ddev); kfree(ddev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment