Commit ecf93431 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.14-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Fix crashes coming out of nap on 32-bit Book3s (eg. powerbooks).

 - Fix critical and debug interrupts on BookE, seen as crashes when
   using ptrace.

 - Fix an oops when running an SMP kernel on a UP system.

 - Update pseries LPAR security flavor after partition migration.

 - Fix an oops when using kprobes on BookE.

 - Fix oops on 32-bit pmac by not calling do_IRQ() from
   timer_interrupt().

 - Fix softlockups on CPU hotplug into a CPU-less node with xive (P9).

Thanks to Cédric Le Goater, Christophe Leroy, Finn Thain, Geetika
Moolchandani, Laurent Dufour, Laurent Vivier, Nicholas Piggin, Pu Lehui,
Radu Rendec, Srikar Dronamraju, and Stan Johnson.

* tag 'powerpc-5.14-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/xive: Do not skip CPU-less nodes when creating the IPIs
  powerpc/interrupt: Do not call single_step_exception() from other exceptions
  powerpc/interrupt: Fix OOPS by not calling do_IRQ() from timer_interrupt()
  powerpc/kprobes: Fix kprobe Oops happens in booke
  powerpc/pseries: Fix update of LPAR security flavor after LPM
  powerpc/smp: Fix OOPS in topology_init()
  powerpc/32: Fix critical and debug interrupts on BOOKE
  powerpc/32s: Fix napping restore in data storage interrupt (DSI)
parents c4f14eac cbc06f05
...@@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); ...@@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
/* irq.c */
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
void __noreturn unrecoverable_exception(struct pt_regs *regs); void __noreturn unrecoverable_exception(struct pt_regs *regs);
void replay_system_reset(void); void replay_system_reset(void);
......
...@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS]; ...@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS]; extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS]; extern void *softirq_ctx[NR_CPUS];
extern void do_IRQ(struct pt_regs *regs); void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void); extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs); extern void __do_irq(struct pt_regs *regs);
......
...@@ -70,6 +70,22 @@ struct pt_regs ...@@ -70,6 +70,22 @@ struct pt_regs
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
}; };
#endif #endif
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
struct { /* Must be a multiple of 16 bytes */
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
#endif
}; };
#endif #endif
......
...@@ -309,24 +309,21 @@ int main(void) ...@@ -309,24 +309,21 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr); STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif #endif
#if defined(CONFIG_PPC32) #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) STACK_PT_REGS_OFFSET(MAS0, mas0);
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); STACK_PT_REGS_OFFSET(MMUCR, mas0);
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); STACK_PT_REGS_OFFSET(MAS1, mas1);
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); STACK_PT_REGS_OFFSET(MAS2, mas2);
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); STACK_PT_REGS_OFFSET(MAS3, mas3);
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); STACK_PT_REGS_OFFSET(MAS6, mas6);
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); STACK_PT_REGS_OFFSET(MAS7, mas7);
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); STACK_PT_REGS_OFFSET(_SRR0, srr0);
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); STACK_PT_REGS_OFFSET(_SRR1, srr1);
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif
#endif #endif
/* About the CPU features table */ /* About the CPU features table */
......
...@@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) ...@@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
EXCEPTION_PROLOG_1 EXCEPTION_PROLOG_1
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
prepare_transfer_to_handler prepare_transfer_to_handler
lwz r5, _DSISR(r11) lwz r5, _DSISR(r1)
andis. r0, r5, DSISR_DABRMATCH@h andis. r0, r5, DSISR_DABRMATCH@h
bne- 1f bne- 1f
bl do_page_fault bl do_page_fault
......
...@@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) ...@@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
/* only on e500mc */ /* only on e500mc */
#define DBG_STACK_BASE dbgirq_ctx #define DBG_STACK_BASE dbgirq_ctx
#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
mfspr r8,SPRN_PIR; \ mfspr r8,SPRN_PIR; \
slwi r8,r8,2; \ slwi r8,r8,2; \
addis r8,r8,level##_STACK_BASE@ha; \ addis r8,r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \ lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD; addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#else #else
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
lis r8,level##_STACK_BASE@ha; \ lis r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \ lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD; addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#endif #endif
/* /*
...@@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) ...@@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtmsr r11; \ mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
beq 1f; \ beq 1f; \
/* COMING FROM USER MODE */ \ /* COMING FROM USER MODE */ \
stw r9,_CCR(r11); /* save CR */\ stw r9,_CCR(r11); /* save CR */\
...@@ -516,24 +514,5 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) ...@@ -516,24 +514,5 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
bl kernel_fp_unavailable_exception; \ bl kernel_fp_unavailable_exception; \
b interrupt_return b interrupt_return
#else /* __ASSEMBLY__ */
struct exception_regs {
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
/* ensure this structure is always sized to a multiple of the stack alignment */
#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */ #endif /* __HEAD_BOOKE_H__ */
...@@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs) ...@@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs); trace_irq_exit(regs);
} }
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) void __do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp; void *cursp, *irqsp, *sirqsp;
...@@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) ...@@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
__do_IRQ(regs);
}
static void *__init alloc_vm_stack(void) static void *__init alloc_vm_stack(void)
{ {
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
......
...@@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs) ...@@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs)) if (user_mode(regs))
return 0; return 0;
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) if (!IS_ENABLED(CONFIG_BOOKE) &&
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0; return 0;
/* /*
......
...@@ -1167,7 +1167,7 @@ static int __init topology_init(void) ...@@ -1167,7 +1167,7 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid * CPU. For instance, the boot cpu might never be valid
* for hotplugging. * for hotplugging.
*/ */
if (smp_ops->cpu_offline_self) if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1; c->hotpluggable = 1;
#endif #endif
......
...@@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) ...@@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0) if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs); __do_IRQ(regs);
#endif #endif
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
......
...@@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException) ...@@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
_exception(SIGTRAP, regs, TRAP_UNK, 0); _exception(SIGTRAP, regs, TRAP_UNK, 0);
} }
DEFINE_INTERRUPT_HANDLER(single_step_exception) static void __single_step_exception(struct pt_regs *regs)
{ {
clear_single_step(regs); clear_single_step(regs);
clear_br_trace(regs); clear_br_trace(regs);
...@@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception) ...@@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
} }
DEFINE_INTERRUPT_HANDLER(single_step_exception)
{
__single_step_exception(regs);
}
/* /*
* After we have successfully emulated an instruction, we have to * After we have successfully emulated an instruction, we have to
* check if the instruction was being single-stepped, and if so, * check if the instruction was being single-stepped, and if so,
...@@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception) ...@@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
static void emulate_single_step(struct pt_regs *regs) static void emulate_single_step(struct pt_regs *regs)
{ {
if (single_stepping(regs)) if (single_stepping(regs))
single_step_exception(regs); __single_step_exception(regs);
} }
static inline int __parse_fpscr(unsigned long fpscr) static inline int __parse_fpscr(unsigned long fpscr)
......
...@@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) ...@@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
* H_CPU_BEHAV_FAVOUR_SECURITY is. * H_CPU_BEHAV_FAVOUR_SECURITY is.
*/ */
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) pseries_security_flavor = 0;
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 1; pseries_security_flavor = 1;
else else
pseries_security_flavor = 2; pseries_security_flavor = 2;
......
...@@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain; ...@@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
static struct xive_ipi_desc { static struct xive_ipi_desc {
unsigned int irq; unsigned int irq;
char name[16]; char name[16];
atomic_t started;
} *xive_ipis; } *xive_ipis;
/* /*
...@@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = { ...@@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
.alloc = xive_ipi_irq_domain_alloc, .alloc = xive_ipi_irq_domain_alloc,
}; };
static int __init xive_request_ipi(void) static int __init xive_init_ipis(void)
{ {
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
struct irq_domain *ipi_domain; struct irq_domain *ipi_domain;
...@@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void) ...@@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
struct xive_ipi_desc *xid = &xive_ipis[node]; struct xive_ipi_desc *xid = &xive_ipis[node];
struct xive_ipi_alloc_info info = { node }; struct xive_ipi_alloc_info info = { node };
/* Skip nodes without CPUs */
if (cpumask_empty(cpumask_of_node(node)))
continue;
/* /*
* Map one IPI interrupt per node for all cpus of that node. * Map one IPI interrupt per node for all cpus of that node.
* Since the HW interrupt number doesn't have any meaning, * Since the HW interrupt number doesn't have any meaning,
...@@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void) ...@@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
xid->irq = ret; xid->irq = ret;
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
} }
return ret; return ret;
...@@ -1178,6 +1170,22 @@ static int __init xive_request_ipi(void) ...@@ -1178,6 +1170,22 @@ static int __init xive_request_ipi(void)
return ret; return ret;
} }
static int __init xive_request_ipi(unsigned int cpu)
{
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
int ret;
if (atomic_inc_return(&xid->started) > 1)
return 0;
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_PERCPU | IRQF_NO_THREAD,
xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
return ret;
}
static int xive_setup_cpu_ipi(unsigned int cpu) static int xive_setup_cpu_ipi(unsigned int cpu)
{ {
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
...@@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu) ...@@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
if (xc->hw_ipi != XIVE_BAD_IRQ) if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0; return 0;
/* Register the IPI */
xive_request_ipi(cpu);
/* Grab an IPI from the backend, this will populate xc->hw_ipi */ /* Grab an IPI from the backend, this will populate xc->hw_ipi */
if (xive_ops->get_ipi(cpu, xc)) if (xive_ops->get_ipi(cpu, xc))
return -EIO; return -EIO;
...@@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) ...@@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
if (xc->hw_ipi == XIVE_BAD_IRQ) if (xc->hw_ipi == XIVE_BAD_IRQ)
return; return;
/* TODO: clear IPI mapping */
/* Mask the IPI */ /* Mask the IPI */
xive_do_source_set_mask(&xc->ipi_data, true); xive_do_source_set_mask(&xc->ipi_data, true);
...@@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void) ...@@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
smp_ops->cause_ipi = xive_cause_ipi; smp_ops->cause_ipi = xive_cause_ipi;
/* Register the IPI */ /* Register the IPI */
xive_request_ipi(); xive_init_ipis();
/* Allocate and setup IPI for the boot CPU */ /* Allocate and setup IPI for the boot CPU */
xive_setup_cpu_ipi(smp_processor_id()); xive_setup_cpu_ipi(smp_processor_id());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment