Commit 116ef0fc authored by Russell King's avatar Russell King

Merge branches 'fixes' and 'misc' into for-next

Victor is known as a "digital talking book player" manufactured by
VisuAide, Inc. to be used by blind people.
For more information related to Victor, see:
http://www.humanware.com/en-usa/products
Of course Victor is using Linux as its main operating system.
The Victor implementation for Linux is maintained by Nicolas Pitre:
nico@visuaide.com
nico@fluxnic.net
For any comments, please feel free to contact me through the above
addresses.
......@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
located here through iotable_init().
VMALLOC_START is based upon the value
of the high_memory variable, and VMALLOC_END
is equal to 0xff000000.
is equal to 0xff800000.
PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
This maps the platforms RAM, and typically
......
......@@ -19,6 +19,11 @@ interrupts.
- reg : Specify the base address and the size of the TWD timer
register window.
Optional
- always-on : a boolean property. If present, the timer is powered through
an always-on power domain, therefore it never loses context.
Example:
twd-timer@2c000600 {
......
......@@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY
config ARCH_RPC
bool "RiscPC"
depends on MMU
select ARCH_ACORN
select ARCH_MAY_HAVE_PC_FDC
select ARCH_SPARSEMEM_ENABLE
......@@ -1410,7 +1411,6 @@ config HAVE_ARM_ARCH_TIMER
config HAVE_ARM_TWD
bool
depends on SMP
select CLKSRC_OF if OF
help
This options enables support for the ARM timer and watchdog unit
......@@ -1470,6 +1470,8 @@ choice
config VMSPLIT_3G
bool "3G/1G user/kernel split"
config VMSPLIT_3G_OPT
bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2G
bool "2G/2G user/kernel split"
config VMSPLIT_1G
......@@ -1481,6 +1483,7 @@ config PAGE_OFFSET
default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
default 0xB0000000 if VMSPLIT_3G_OPT
default 0xC0000000
config NR_CPUS
......@@ -1695,8 +1698,9 @@ config HIGHMEM
If unsure, say n.
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem"
bool "Allocate 2nd-level pagetables from highmem" if EXPERT
depends on HIGHMEM
default y
help
The VM uses one page of physical memory for each page table.
For systems with a lot of processes, this can use a lot of
......
......@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
case 1:
asm volatile("@ __xchg1\n"
"1: ldrexb %0, [%3]\n"
......@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
case 2:
asm volatile("@ __xchg2\n"
"1: ldrexh %0, [%3]\n"
" strexh %1, %2, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (ret), "=&r" (tmp)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
#endif
case 4:
asm volatile("@ __xchg4\n"
"1: ldrex %0, [%3]\n"
......
......@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#ifndef CONFIG_CPU_V7M
#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
#else
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
#else
/*
......@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
: "memory", "cc"); \
})
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
/*
......
......@@ -47,7 +47,7 @@ struct machine_desc {
unsigned l2c_aux_val; /* L2 cache aux value */
unsigned l2c_aux_mask; /* L2 cache aux mask */
void (*l2c_write_sec)(unsigned long, unsigned);
struct smp_operations *smp; /* SMP operations */
const struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **);
void (*dt_fixup)(void);
......
......@@ -76,10 +76,12 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
*/
#define IOREMAP_MAX_ORDER 24
#endif
#else /* CONFIG_MMU */
......
......@@ -43,7 +43,7 @@
*/
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END 0xff000000UL
#define VMALLOC_END 0xff800000UL
#define LIBRARY_TEXT_START 0x0c000000
......
......@@ -112,7 +112,7 @@ struct smp_operations {
struct of_cpu_method {
const char *method;
struct smp_operations *ops;
const struct smp_operations *ops;
};
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
......@@ -122,6 +122,6 @@ struct of_cpu_method {
/*
* set platform specific SMP operations
*/
extern void smp_set_ops(struct smp_operations *);
extern void smp_set_ops(const struct smp_operations *);
#endif /* ifndef __ASM_ARM_SMP_H */
......@@ -21,13 +21,6 @@
*/
#define __NR_syscalls (392)
/*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
......
......@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
if (of_property_read_u32(cpu, "reg", &hwid)) {
pr_debug(" * %s missing reg property\n",
cpu->full_name);
of_node_put(cpu);
return;
}
......@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
* 8 MSBs must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
*/
if (hwid & ~MPIDR_HWID_BITMASK)
if (hwid & ~MPIDR_HWID_BITMASK) {
of_node_put(cpu);
return;
}
/*
* Duplicate MPIDRs are a recipe for disaster.
......@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
* to avoid matching valid MPIDR[23:0] values.
*/
for (j = 0; j < cpuidx; j++)
if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
"properties in the DT\n"))
if (WARN(tmp_map[j] == hwid,
"Duplicate /cpu reg properties in the DT\n")) {
of_node_put(cpu);
return;
}
/*
* Build a stashed array of MPIDR values. Numbering scheme
......@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
"max cores %u, capping them\n",
cpuidx, nr_cpu_ids)) {
cpuidx = nr_cpu_ids;
of_node_put(cpu);
break;
}
......
......@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
.endm
.macro kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
......@@ -859,20 +858,7 @@ __kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg64
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg64
#elif defined(CONFIG_CPU_32v6K)
#if defined(CONFIG_CPU_32v6K)
stmfd sp!, {r4, r5, r6, r7}
ldrd r4, r5, [r0] @ load old val
......@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
__kuser_cmpxchg: @ 0xffff0fc0
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg
#elif __LINUX_ARM_ARCH__ < 6
#if __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU
......
......@@ -35,7 +35,6 @@
#include <asm/cputype.h>
#include <asm/current.h>
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h>
/* Breakpoint currently in use for each BRP. */
......
......@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{
struct pt_regs *thread_regs;
struct thread_info *ti;
int regno;
/* Just making sure... */
......@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */
thread_regs = task_pt_regs(task);
gdb_regs[_R0] = thread_regs->ARM_r0;
gdb_regs[_R1] = thread_regs->ARM_r1;
gdb_regs[_R2] = thread_regs->ARM_r2;
gdb_regs[_R3] = thread_regs->ARM_r3;
gdb_regs[_R4] = thread_regs->ARM_r4;
gdb_regs[_R5] = thread_regs->ARM_r5;
gdb_regs[_R6] = thread_regs->ARM_r6;
gdb_regs[_R7] = thread_regs->ARM_r7;
gdb_regs[_R8] = thread_regs->ARM_r8;
gdb_regs[_R9] = thread_regs->ARM_r9;
gdb_regs[_R10] = thread_regs->ARM_r10;
gdb_regs[_FP] = thread_regs->ARM_fp;
gdb_regs[_IP] = thread_regs->ARM_ip;
gdb_regs[_SPT] = thread_regs->ARM_sp;
gdb_regs[_LR] = thread_regs->ARM_lr;
gdb_regs[_PC] = thread_regs->ARM_pc;
gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
ti = task_thread_info(task);
gdb_regs[_R4] = ti->cpu_context.r4;
gdb_regs[_R5] = ti->cpu_context.r5;
gdb_regs[_R6] = ti->cpu_context.r6;
gdb_regs[_R7] = ti->cpu_context.r7;
gdb_regs[_R8] = ti->cpu_context.r8;
gdb_regs[_R9] = ti->cpu_context.r9;
gdb_regs[_R10] = ti->cpu_context.sl;
gdb_regs[_FP] = ti->cpu_context.fp;
gdb_regs[_SPT] = ti->cpu_context.sp;
gdb_regs[_PC] = ti->cpu_context.pc;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
......
......@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops;
void __init smp_set_ops(struct smp_operations *ops)
void __init smp_set_ops(const struct smp_operations *ops)
{
if (ops)
smp_ops = *ops;
......@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable();
local_fiq_enable();
local_abt_enable();
/*
* OK, it's off to the idle thread for us
......@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
/*
* Generate the backtrace directly if we are running in a calling
* context that is not preemptible by the backtrace IPI. Note
* that nmi_cpu_backtrace() automatically removes the current cpu
* from mask.
*/
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
......
......@@ -23,7 +23,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
/* set up by the platform code */
......@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu *twd_evt;
static unsigned int twd_features =
CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
static int twd_ppi;
static int twd_shutdown(struct clock_event_device *clk)
......@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clk->features = twd_features;
clk->rating = 350;
clk->set_state_shutdown = twd_shutdown;
clk->set_state_periodic = twd_set_periodic;
......@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto out_irq;
twd_get_clock(np);
if (!of_property_read_bool(np, "always-on"))
twd_features |= CLOCK_EVT_FEAT_C3STOP;
/*
* Immediately configure the timer on the boot CPU, unless we need
......@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
{
int err;
if (!is_smp() || !setup_max_cpus)
return;
twd_ppi = irq_of_parse_and_map(np, 0);
if (!twd_ppi) {
err = -EINVAL;
......
......@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
set_tls(regs->ARM_r0);
return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case NR(cmpxchg):
for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;
bad_access:
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
}
#endif
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
......
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
......@@ -20,6 +21,8 @@
*/
ENTRY(__clear_user_std)
WEAK(arm_clear_user)
UNWIND(.fnstart)
UNWIND(.save {r1, lr})
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
......@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
......
......@@ -419,28 +419,24 @@ config CPU_THUMBONLY
config CPU_32v3
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
......@@ -805,14 +801,6 @@ config TLS_REG_EMUL
a few prototypes like that in existence) and therefore access to
that required register must be emulated.
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
config NEED_KUSER_HELPERS
bool
......
......@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (!pages)
return -ENXIO;
if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
return -ENXIO;
pages += off;
do {
int ret = vm_insert_page(vma, uaddr, *pages++);
if (ret) {
......
......@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, ifsr, 0);
}
/*
* Abort handler to be used only during first unmasking of asynchronous aborts
* on the boot CPU. This makes sure that the machine will not die if the
* firmware/bootloader left an imprecise abort pending for us to trip over.
*/
static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
"first unmask, this is most likely caused by a "
"firmware/bootloader bug.\n", fsr);
return 0;
}
void __init early_abt_enable(void)
{
fsr_info[22].fn = early_abort_handler;
local_abt_enable();
fsr_info[22].fn = do_bad;
}
#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void)
{
......
......@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
void early_abt_enable(void);
#endif /* __ARCH_ARM_FAULT_H */
......@@ -38,6 +38,7 @@
#include <asm/mach/pci.h>
#include <asm/fixmap.h>
#include "fault.h"
#include "mm.h"
#include "tcm.h"
......@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
*/
local_flush_tlb_all();
flush_cache_all();
/* Enable asynchronous aborts */
early_abt_enable();
}
static void __init kmap_init(void)
......
......@@ -45,7 +45,6 @@
* it does.
*/
#include <byteswap.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
......@@ -59,6 +58,16 @@
#include <sys/types.h>
#include <unistd.h>
#define swab16(x) \
((((x) & 0x00ff) << 8) | \
(((x) & 0xff00) >> 8))
#define swab32(x) \
((((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) >> 24))
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_ORDER ELFDATA2LSB
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
......@@ -104,17 +113,17 @@ static void cleanup(void)
static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
{
return swap ? bswap_32(word) : word;
return swap ? swab32(word) : word;
}
static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
{
return swap ? bswap_16(half) : half;
return swap ? swab16(half) : half;
}
static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
{
*dst = swap ? bswap_32(val) : val;
*dst = swap ? swab32(val) : val;
}
int main(int argc, char **argv)
......
......@@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name,
if (IS_ERR(r))
return PTR_ERR(r);
l = clkdev_create(r, alias, "%s", alias_dev_name);
l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL,
alias_dev_name);
clk_put(r);
return l ? 0 : -ENODEV;
......
......@@ -41,8 +41,6 @@ struct amba_driver {
int (*probe)(struct amba_device *, const struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
int (*resume)(struct amba_device *);
const struct amba_id *id_table;
};
......
......@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
printk("%.*s", (end - start) + 1, buf);
}
/*
* When raise() is called it will be is passed a pointer to the
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
* directly from their raise() functions may rely on the mask
* they are passed being updated as a side effect of this call.
*/
void nmi_trigger_all_cpu_backtrace(bool include_self,
void (*raise)(cpumask_t *mask))
{
......@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
/* Replace printk to write into the NMI seq */
this_cpu_write(printk_func, nmi_vprintk);
pr_warn("NMI backtrace for cpu %d\n", cpu);
if (regs)
show_regs(regs);
else
dump_stack();
this_cpu_write(printk_func, printk_func_save);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment