Commit 9b81c021 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: make PACA_IRQ_HARD_DIS track MSR[EE] closely

When the masked interrupt handler clears MSR[EE] for an interrupt in
the PACA_IRQ_MUST_HARD_MASK set, it does not set PACA_IRQ_HARD_DIS.
This makes them get out of synch.

With that taken into account, it's only low level irq manipulation
(and interrupt entry before reconcile) where they can be out of synch.
This makes the code less surprising.

It also allows the IRQ replay code to rely on the IRQ_HARD_DIS value
and not have to mtmsrd again in this case (e.g., for an external
interrupt that has been masked). The bigger benefit might just be
that there is not such an element of surprise in these two bits of
state.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 29e8131c
...@@ -253,14 +253,16 @@ static inline bool lazy_irq_pending(void) ...@@ -253,14 +253,16 @@ static inline bool lazy_irq_pending(void)
/* /*
* This is called by asynchronous interrupts to conditionally * This is called by asynchronous interrupts to conditionally
* re-enable hard interrupts when soft-disabled after having * re-enable hard interrupts after having cleared the source
* cleared the source of the interrupt * of the interrupt. They are kept disabled if there is a different
* soft-masked interrupt pending that requires hard masking.
*/ */
static inline void may_hard_irq_enable(void) static inline void may_hard_irq_enable(void)
{ {
if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
__hard_irq_enable(); __hard_irq_enable();
}
} }
static inline bool arch_irq_disabled_regs(struct pt_regs *regs) static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
......
...@@ -992,6 +992,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -992,6 +992,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
or r4,r4,r3 or r4,r4,r3
std r4,_TRAP(r1) std r4,_TRAP(r1)
/*
* PACA_IRQ_HARD_DIS won't always be set here, so set it now
* to reconcile the IRQ state. Tracing is already accounted for.
*/
lbz r4,PACAIRQHAPPENED(r13)
ori r4,r4,PACA_IRQ_HARD_DIS
stb r4,PACAIRQHAPPENED(r13)
/* /*
* Then find the right handler and call it. Interrupts are * Then find the right handler and call it. Interrupts are
* still soft-disabled and we keep them that way. * still soft-disabled and we keep them that way.
......
...@@ -949,7 +949,11 @@ kernel_dbg_exc: ...@@ -949,7 +949,11 @@ kernel_dbg_exc:
.macro masked_interrupt_book3e paca_irq full_mask .macro masked_interrupt_book3e paca_irq full_mask
lbz r10,PACAIRQHAPPENED(r13) lbz r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
.else
ori r10,r10,\paca_irq ori r10,r10,\paca_irq
.endif
stb r10,PACAIRQHAPPENED(r13) stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1 .if \full_mask == 1
......
...@@ -1498,7 +1498,10 @@ masked_##_H##interrupt: \ ...@@ -1498,7 +1498,10 @@ masked_##_H##interrupt: \
mfspr r10,SPRN_##_H##SRR1; \ mfspr r10,SPRN_##_H##SRR1; \
xori r10,r10,MSR_EE; /* clear MSR_EE */ \ xori r10,r10,MSR_EE; /* clear MSR_EE */ \
mtspr SPRN_##_H##SRR1,r10; \ mtspr SPRN_##_H##SRR1,r10; \
2: mtcrf 0x80,r9; \ ori r11,r11,PACA_IRQ_HARD_DIS; \
stb r11,PACAIRQHAPPENED(r13); \
2: /* done */ \
mtcrf 0x80,r9; \
std r1,PACAR1(r13); \ std r1,PACAR1(r13); \
ld r9,PACA_EXGEN+EX_R9(r13); \ ld r9,PACA_EXGEN+EX_R9(r13); \
ld r10,PACA_EXGEN+EX_R10(r13); \ ld r10,PACA_EXGEN+EX_R10(r13); \
......
...@@ -36,7 +36,7 @@ _GLOBAL(\name) ...@@ -36,7 +36,7 @@ _GLOBAL(\name)
*/ */
lbz r3,PACAIRQHAPPENED(r13) lbz r3,PACAIRQHAPPENED(r13)
cmpwi cr0,r3,0 cmpwi cr0,r3,0
bnelr bne 2f
/* Now we are going to mark ourselves as soft and hard enabled in /* Now we are going to mark ourselves as soft and hard enabled in
* order to be able to take interrupts while asleep. We inform lockdep * order to be able to take interrupts while asleep. We inform lockdep
...@@ -72,6 +72,11 @@ _GLOBAL(\name) ...@@ -72,6 +72,11 @@ _GLOBAL(\name)
wrteei 1 wrteei 1
\loop \loop
2:
lbz r10,PACAIRQHAPPENED(r13)
ori r10,r10,PACA_IRQ_HARD_DIS
stb r10,PACAIRQHAPPENED(r13)
blr
.endm .endm
.macro BOOK3E_IDLE_LOOP .macro BOOK3E_IDLE_LOOP
......
...@@ -145,8 +145,20 @@ notrace unsigned int __check_irq_replay(void) ...@@ -145,8 +145,20 @@ notrace unsigned int __check_irq_replay(void)
trace_hardirqs_on(); trace_hardirqs_on();
trace_hardirqs_off(); trace_hardirqs_off();
/*
* We are always hard disabled here, but PACA_IRQ_HARD_DIS may
* not be set, which means interrupts have only just been hard
* disabled as part of the local_irq_restore or interrupt return
* code. In that case, skip the decrementr check becaus it's
* expensive to read the TB.
*
* HARD_DIS then gets cleared here, but it's reconciled later.
* Either local_irq_disable will replay the interrupt and that
* will reconcile state like other hard interrupts. Or interrupt
* retur will replay the interrupt and in that case it sets
* PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
*/
if (happened & PACA_IRQ_HARD_DIS) { if (happened & PACA_IRQ_HARD_DIS) {
/* Clear bit 0 which we wouldn't clear otherwise */
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
/* /*
...@@ -248,24 +260,26 @@ notrace void arch_local_irq_restore(unsigned long mask) ...@@ -248,24 +260,26 @@ notrace void arch_local_irq_restore(unsigned long mask)
* cannot have preempted. * cannot have preempted.
*/ */
irq_happened = get_irq_happened(); irq_happened = get_irq_happened();
if (!irq_happened) if (!irq_happened) {
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(!(mfmsr() & MSR_EE));
#endif
return; return;
}
/* /*
* We need to hard disable to get a trusted value from * We need to hard disable to get a trusted value from
* __check_irq_replay(). We also need to soft-disable * __check_irq_replay(). We also need to soft-disable
* again to avoid warnings in there due to the use of * again to avoid warnings in there due to the use of
* per-cpu variables. * per-cpu variables.
*
* We know that if the value in irq_happened is exactly 0x01
* then we are already hard disabled (there are other less
* common cases that we'll ignore for now), so we skip the
* (expensive) mtmsrd.
*/ */
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(!(mfmsr() & MSR_EE));
#endif
__hard_irq_disable(); __hard_irq_disable();
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
else { } else {
/* /*
* We should already be hard disabled here. We had bugs * We should already be hard disabled here. We had bugs
* where that wasn't the case so let's dbl check it and * where that wasn't the case so let's dbl check it and
...@@ -274,8 +288,8 @@ notrace void arch_local_irq_restore(unsigned long mask) ...@@ -274,8 +288,8 @@ notrace void arch_local_irq_restore(unsigned long mask)
*/ */
if (WARN_ON(mfmsr() & MSR_EE)) if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable(); __hard_irq_disable();
}
#endif #endif
}
irq_soft_mask_set(IRQS_ALL_DISABLED); irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off(); trace_hardirqs_off();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment