Commit 7b08729c authored by Michael Ellerman's avatar Michael Ellerman

powerpc/64: Save stack pointer when we hard disable interrupts

A CPU that gets stuck with interrupts hard disable can be difficult to
debug, as on some platforms we have no way to interrupt the CPU to
find out what it's doing.

A stop-gap is to have the CPU save it's stack pointer (r1) in its paca
when it hard disables interrupts. That way if we can't interrupt it,
we can at least trace the stack based on where it last disabled
interrupts.

In some cases that will be total junk, but the stack trace code should
handle that. In the simple case of a CPU that disable interrupts and
then gets stuck in a loop, the stack trace should be informative.

We could clear the saved stack pointer when we enable interrupts, but
that loses information which could be useful if we have nothing else
to go on.
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
parent 3e378680
...@@ -238,8 +238,12 @@ static inline bool arch_irqs_disabled(void) ...@@ -238,8 +238,12 @@ static inline bool arch_irqs_disabled(void)
__hard_irq_disable(); \ __hard_irq_disable(); \
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \ if (!arch_irqs_disabled_flags(flags)) { \
asm ("stdx %%r1, 0, %1 ;" \
: "=m" (local_paca->saved_r1) \
: "b" (&local_paca->saved_r1)); \
trace_hardirqs_off(); \ trace_hardirqs_off(); \
} \
} while(0) } while(0)
static inline bool lazy_irq_pending(void) static inline bool lazy_irq_pending(void)
......
...@@ -161,7 +161,7 @@ struct paca_struct { ...@@ -161,7 +161,7 @@ struct paca_struct {
struct task_struct *__current; /* Pointer to current */ struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */ u64 kstack; /* Saved Kernel stack addr */
u64 stab_rr; /* stab/slb round-robin counter */ u64 stab_rr; /* stab/slb round-robin counter */
u64 saved_r1; /* r1 save for RTAS calls or PM */ u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */ u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */ u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */ u8 irq_soft_mask; /* mask for irq soft masking */
......
...@@ -1499,6 +1499,7 @@ masked_##_H##interrupt: \ ...@@ -1499,6 +1499,7 @@ masked_##_H##interrupt: \
xori r10,r10,MSR_EE; /* clear MSR_EE */ \ xori r10,r10,MSR_EE; /* clear MSR_EE */ \
mtspr SPRN_##_H##SRR1,r10; \ mtspr SPRN_##_H##SRR1,r10; \
2: mtcrf 0x80,r9; \ 2: mtcrf 0x80,r9; \
std r1,PACAR1(r13); \
ld r9,PACA_EXGEN+EX_R9(r13); \ ld r9,PACA_EXGEN+EX_R9(r13); \
ld r10,PACA_EXGEN+EX_R10(r13); \ ld r10,PACA_EXGEN+EX_R10(r13); \
ld r11,PACA_EXGEN+EX_R11(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \
......
...@@ -1172,6 +1172,10 @@ static int cpu_cmd(void) ...@@ -1172,6 +1172,10 @@ static int cpu_cmd(void)
/* try to switch to cpu specified */ /* try to switch to cpu specified */
if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
printf("cpu 0x%lx isn't in xmon\n", cpu); printf("cpu 0x%lx isn't in xmon\n", cpu);
#ifdef CONFIG_PPC64
printf("backtrace of paca[0x%lx].saved_r1 (possibly stale):\n", cpu);
xmon_show_stack(paca_ptrs[cpu]->saved_r1, 0, 0);
#endif
return 0; return 0;
} }
xmon_taken = 0; xmon_taken = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment