Commit 4e26bc4a authored by Madhavan Srinivasan's avatar Madhavan Srinivasan Committed by Michael Ellerman

powerpc/64: Rename soft_enabled to irq_soft_mask

Rename the paca->soft_enabled to paca->irq_soft_mask as it is no
longer used as a flag for interrupt state, but a mask.
Signed-off-by: default avatarMadhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 01417c6c
...@@ -432,7 +432,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -432,7 +432,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mflr r9; /* Get LR, later save to stack */ \ mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \ std r9,_LINK(r1); \
lbz r10,PACASOFTIRQEN(r13); \ lbz r10,PACAIRQSOFTMASK(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \ mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r10,SOFTE(r1); \ std r10,SOFTE(r1); \
std r11,_XER(r1); \ std r11,_XER(r1); \
...@@ -498,7 +498,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -498,7 +498,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE #define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
#define __SOFTEN_TEST(h, vec) \ #define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \ lbz r10,PACAIRQSOFTMASK(r13); \
andi. r10,r10,IRQS_DISABLED; \ andi. r10,r10,IRQS_DISABLED; \
li r10,SOFTEN_VALUE_##vec; \ li r10,SOFTEN_VALUE_##vec; \
bne masked_##h##interrupt bne masked_##h##interrupt
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#define PACA_IRQ_HMI 0x20 #define PACA_IRQ_HMI 0x20
/* /*
* flags for paca->soft_enabled * flags for paca->irq_soft_mask
*/ */
#define IRQS_ENABLED 0 #define IRQS_ENABLED 0
#define IRQS_DISABLED 1 #define IRQS_DISABLED 1
...@@ -49,14 +49,14 @@ extern void unknown_exception(struct pt_regs *regs); ...@@ -49,14 +49,14 @@ extern void unknown_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/paca.h> #include <asm/paca.h>
static inline notrace unsigned long soft_enabled_return(void) static inline notrace unsigned long irq_soft_mask_return(void)
{ {
unsigned long flags; unsigned long flags;
asm volatile( asm volatile(
"lbz %0,%1(13)" "lbz %0,%1(13)"
: "=r" (flags) : "=r" (flags)
: "i" (offsetof(struct paca_struct, soft_enabled))); : "i" (offsetof(struct paca_struct, irq_soft_mask)));
return flags; return flags;
} }
...@@ -64,18 +64,24 @@ static inline notrace unsigned long soft_enabled_return(void) ...@@ -64,18 +64,24 @@ static inline notrace unsigned long soft_enabled_return(void)
/* /*
* The "memory" clobber acts as both a compiler barrier * The "memory" clobber acts as both a compiler barrier
* for the critical section and as a clobber because * for the critical section and as a clobber because
* we changed paca->soft_enabled * we changed paca->irq_soft_mask
*/ */
static inline notrace void soft_enabled_set(unsigned long enable) static inline notrace void irq_soft_mask_set(unsigned long mask)
{ {
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
/* /*
* mask must always include LINUX bit if any are set, and * The irq mask must always include the STD bit if any are set.
* interrupts don't get replayed until the Linux interrupt is *
* unmasked. This could be changed to replay partial unmasks * and interrupts don't get replayed until the standard
* in future, which would allow Linux masks to nest inside * interrupt (local_irq_disable()) is unmasked.
* other masks, among other things. For now, be very dumb and *
* simple. * Other masks must only provide additional masking beyond
* the standard, and they are also not replayed until the
* standard interrupt becomes unmasked.
*
* This could be changed, but it will require partial
* unmasks to be replayed, among other things. For now, take
* the simple approach.
*/ */
WARN_ON(mask && !(mask & IRQS_DISABLED)); WARN_ON(mask && !(mask & IRQS_DISABLED));
#endif #endif
...@@ -83,12 +89,12 @@ static inline notrace void soft_enabled_set(unsigned long enable) ...@@ -83,12 +89,12 @@ static inline notrace void soft_enabled_set(unsigned long enable)
asm volatile( asm volatile(
"stb %0,%1(13)" "stb %0,%1(13)"
: :
: "r" (enable), : "r" (mask),
"i" (offsetof(struct paca_struct, soft_enabled)) "i" (offsetof(struct paca_struct, irq_soft_mask))
: "memory"); : "memory");
} }
static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{ {
unsigned long flags; unsigned long flags;
...@@ -99,7 +105,7 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) ...@@ -99,7 +105,7 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)
asm volatile( asm volatile(
"lbz %0,%1(13); stb %2,%1(13)" "lbz %0,%1(13); stb %2,%1(13)"
: "=&r" (flags) : "=&r" (flags)
: "i" (offsetof(struct paca_struct, soft_enabled)), : "i" (offsetof(struct paca_struct, irq_soft_mask)),
"r" (mask) "r" (mask)
: "memory"); : "memory");
...@@ -108,12 +114,12 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) ...@@ -108,12 +114,12 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return soft_enabled_return(); return irq_soft_mask_return();
} }
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
soft_enabled_set(IRQS_DISABLED); irq_soft_mask_set(IRQS_DISABLED);
} }
extern void arch_local_irq_restore(unsigned long); extern void arch_local_irq_restore(unsigned long);
...@@ -125,7 +131,7 @@ static inline void arch_local_irq_enable(void) ...@@ -125,7 +131,7 @@ static inline void arch_local_irq_enable(void)
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
return soft_enabled_set_return(IRQS_DISABLED); return irq_soft_mask_set_return(IRQS_DISABLED);
} }
static inline bool arch_irqs_disabled_flags(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags)
...@@ -149,7 +155,7 @@ static inline bool arch_irqs_disabled(void) ...@@ -149,7 +155,7 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() do { \ #define hard_irq_disable() do { \
unsigned long flags; \ unsigned long flags; \
__hard_irq_disable(); \ __hard_irq_disable(); \
flags = soft_enabled_set_return(IRQS_DISABLED);\ flags = irq_soft_mask_set_return(IRQS_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \ if (!arch_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \ trace_hardirqs_off(); \
......
...@@ -47,14 +47,14 @@ ...@@ -47,14 +47,14 @@
* be clobbered. * be clobbered.
*/ */
#define RECONCILE_IRQ_STATE(__rA, __rB) \ #define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \ lbz __rA,PACAIRQSOFTMASK(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \ lbz __rB,PACAIRQHAPPENED(r13); \
andi. __rA,__rA,IRQS_DISABLED;\ andi. __rA,__rA,IRQS_DISABLED; \
li __rA,IRQS_DISABLED; \ li __rA,IRQS_DISABLED; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \ ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \ stb __rB,PACAIRQHAPPENED(r13); \
bne 44f; \ bne 44f; \
stb __rA,PACASOFTIRQEN(r13); \ stb __rA,PACAIRQSOFTMASK(r13); \
TRACE_DISABLE_INTS; \ TRACE_DISABLE_INTS; \
44: 44:
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
lbz __rA,PACAIRQHAPPENED(r13); \ lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,IRQS_DISABLED; \ li __rB,IRQS_DISABLED; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \ ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \ stb __rB,PACAIRQSOFTMASK(r13); \
stb __rA,PACAIRQHAPPENED(r13) stb __rA,PACAIRQHAPPENED(r13)
#endif #endif
#endif #endif
......
...@@ -873,7 +873,7 @@ static inline void kvmppc_fix_ee_before_entry(void) ...@@ -873,7 +873,7 @@ static inline void kvmppc_fix_ee_before_entry(void)
/* Only need to enable IRQs by hard enabling them after this */ /* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0; local_paca->irq_happened = 0;
soft_enabled_set(IRQS_ENABLED); irq_soft_mask_set(IRQS_ENABLED);
#endif #endif
} }
......
...@@ -159,7 +159,7 @@ struct paca_struct { ...@@ -159,7 +159,7 @@ struct paca_struct {
u64 saved_r1; /* r1 save for RTAS calls or PM */ u64 saved_r1; /* r1 save for RTAS calls or PM */
u64 saved_msr; /* MSR saved here by enter_rtas */ u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */ u16 trap_save; /* Used when bad stack is encountered */
u8 soft_enabled; /* irq soft-enable flag */ u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */ u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */ u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
......
...@@ -178,7 +178,7 @@ int main(void) ...@@ -178,7 +178,7 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc); OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase); OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr); OFFSET(PACAKMSR, paca_struct, kernel_msr);
OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled); OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id); OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
......
...@@ -129,7 +129,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) ...@@ -129,7 +129,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
* is correct * is correct
*/ */
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
lbz r10,PACASOFTIRQEN(r13) lbz r10,PACAIRQSOFTMASK(r13)
1: tdnei r10,IRQS_ENABLED 1: tdnei r10,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif #endif
...@@ -781,7 +781,7 @@ restore: ...@@ -781,7 +781,7 @@ restore:
* are about to re-enable interrupts * are about to re-enable interrupts
*/ */
ld r5,SOFTE(r1) ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13) lbz r6,PACAIRQSOFTMASK(r13)
andi. r5,r5,IRQS_DISABLED andi. r5,r5,IRQS_DISABLED
bne .Lrestore_irq_off bne .Lrestore_irq_off
...@@ -806,7 +806,7 @@ restore: ...@@ -806,7 +806,7 @@ restore:
.Lrestore_no_replay: .Lrestore_no_replay:
TRACE_ENABLE_INTS TRACE_ENABLE_INTS
li r0,IRQS_ENABLED li r0,IRQS_ENABLED
stb r0,PACASOFTIRQEN(r13); stb r0,PACAIRQSOFTMASK(r13);
/* /*
* Final return path. BookE is handled in a different file * Final return path. BookE is handled in a different file
...@@ -913,8 +913,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ...@@ -913,8 +913,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1: 1:
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
/* The interrupt should not have soft enabled. */ /* The interrupt should not have soft enabled. */
lbz r7,PACASOFTIRQEN(r13) lbz r7,PACAIRQSOFTMASK(r13)
1: tdnei r7,IRQS_DISABLED 1: tdeqi r7,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif #endif
b .Ldo_restore b .Ldo_restore
...@@ -1034,7 +1034,7 @@ _GLOBAL(enter_rtas) ...@@ -1034,7 +1034,7 @@ _GLOBAL(enter_rtas)
/* There is no way it is acceptable to get here with interrupts enabled, /* There is no way it is acceptable to get here with interrupts enabled,
* check it with the asm equivalent of WARN_ON * check it with the asm equivalent of WARN_ON
*/ */
lbz r0,PACASOFTIRQEN(r13) lbz r0,PACAIRQSOFTMASK(r13)
1: tdeqi r0,IRQS_ENABLED 1: tdeqi r0,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif #endif
......
...@@ -139,7 +139,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ...@@ -139,7 +139,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfspr r10,SPRN_ESR mfspr r10,SPRN_ESR
SPECIAL_EXC_STORE(r10,ESR) SPECIAL_EXC_STORE(r10,ESR)
lbz r10,PACASOFTIRQEN(r13) lbz r10,PACAIRQSOFTMASK(r13)
SPECIAL_EXC_STORE(r10,SOFTE) SPECIAL_EXC_STORE(r10,SOFTE)
ld r10,_NIP(r1) ld r10,_NIP(r1)
SPECIAL_EXC_STORE(r10,CSRR0) SPECIAL_EXC_STORE(r10,CSRR0)
...@@ -206,7 +206,7 @@ BEGIN_FTR_SECTION ...@@ -206,7 +206,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS8,r10 mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
lbz r6,PACASOFTIRQEN(r13) lbz r6,PACAIRQSOFTMASK(r13)
ld r5,SOFTE(r1) ld r5,SOFTE(r1)
/* Interrupts had better not already be enabled... */ /* Interrupts had better not already be enabled... */
...@@ -216,7 +216,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ...@@ -216,7 +216,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
bne 1f bne 1f
TRACE_ENABLE_INTS TRACE_ENABLE_INTS
stb r5,PACASOFTIRQEN(r13) stb r5,PACAIRQSOFTMASK(r13)
1: 1:
/* /*
* Restore PACAIRQHAPPENED rather than setting it based on * Restore PACAIRQHAPPENED rather than setting it based on
...@@ -351,7 +351,7 @@ ret_from_mc_except: ...@@ -351,7 +351,7 @@ ret_from_mc_except:
#define PROLOG_ADDITION_NONE_MC(n) #define PROLOG_ADDITION_NONE_MC(n)
#define PROLOG_ADDITION_MASKABLE_GEN(n) \ #define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
bne masked_interrupt_book3e_##n bne masked_interrupt_book3e_##n
...@@ -397,7 +397,7 @@ exc_##n##_common: \ ...@@ -397,7 +397,7 @@ exc_##n##_common: \
mfspr r8,SPRN_XER; /* save XER in stackframe */ \ mfspr r8,SPRN_XER; /* save XER in stackframe */ \
ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \ lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \
ld r12,exception_marker@toc(r2); \ ld r12,exception_marker@toc(r2); \
li r0,0; \ li r0,0; \
std r3,GPR10(r1); /* save r10 to stackframe */ \ std r3,GPR10(r1); /* save r10 to stackframe */ \
......
...@@ -766,7 +766,7 @@ _GLOBAL(pmac_secondary_start) ...@@ -766,7 +766,7 @@ _GLOBAL(pmac_secondary_start)
* in the PACA when doing hotplug) * in the PACA when doing hotplug)
*/ */
li r0,IRQS_DISABLED li r0,IRQS_DISABLED
stb r0,PACASOFTIRQEN(r13) stb r0,PACAIRQSOFTMASK(r13)
li r0,PACA_IRQ_HARD_DIS li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13) stb r0,PACAIRQHAPPENED(r13)
...@@ -823,7 +823,7 @@ __secondary_start: ...@@ -823,7 +823,7 @@ __secondary_start:
* in the PACA when doing hotplug) * in the PACA when doing hotplug)
*/ */
li r7,IRQS_DISABLED li r7,IRQS_DISABLED
stb r7,PACASOFTIRQEN(r13) stb r7,PACAIRQSOFTMASK(r13)
li r0,PACA_IRQ_HARD_DIS li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13) stb r0,PACAIRQHAPPENED(r13)
...@@ -990,7 +990,7 @@ start_here_common: ...@@ -990,7 +990,7 @@ start_here_common:
* in the PACA when doing hotplug) * in the PACA when doing hotplug)
*/ */
li r0,IRQS_DISABLED li r0,IRQS_DISABLED
stb r0,PACASOFTIRQEN(r13) stb r0,PACAIRQSOFTMASK(r13)
li r0,PACA_IRQ_HARD_DIS li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13) stb r0,PACAIRQHAPPENED(r13)
......
...@@ -48,7 +48,7 @@ _GLOBAL(\name) ...@@ -48,7 +48,7 @@ _GLOBAL(\name)
addi r1,r1,128 addi r1,r1,128
#endif #endif
li r0,IRQS_ENABLED li r0,IRQS_ENABLED
stb r0,PACASOFTIRQEN(r13) stb r0,PACAIRQSOFTMASK(r13)
/* Interrupts will make use return to LR, so get something we want /* Interrupts will make use return to LR, so get something we want
* in there * in there
......
...@@ -55,7 +55,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) ...@@ -55,7 +55,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
#endif /* CONFIG_TRACE_IRQFLAGS */ #endif /* CONFIG_TRACE_IRQFLAGS */
li r0,IRQS_ENABLED li r0,IRQS_ENABLED
stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ stb r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
DSSALL DSSALL
sync sync
......
...@@ -225,22 +225,9 @@ notrace void arch_local_irq_restore(unsigned long mask) ...@@ -225,22 +225,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
unsigned int replay; unsigned int replay;
/* Write the new soft-enabled value */ /* Write the new soft-enabled value */
soft_enabled_set(mask); irq_soft_mask_set(mask);
if (mask) { if (mask)
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* mask must always include LINUX bit if any
* are set, and interrupts don't get replayed until
* the Linux interrupt is unmasked. This could be
* changed to replay partial unmasks in future,
* which would allow Linux masks to nest inside
* other masks, among other things. For now, be very
* dumb and simple.
*/
WARN_ON(!(mask & IRQS_DISABLED));
#endif
return; return;
}
/* /*
* From this point onward, we can take interrupts, preempt, * From this point onward, we can take interrupts, preempt,
...@@ -285,7 +272,7 @@ notrace void arch_local_irq_restore(unsigned long mask) ...@@ -285,7 +272,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
} }
#endif /* CONFIG_TRACE_IRQFLAGS */ #endif /* CONFIG_TRACE_IRQFLAGS */
soft_enabled_set(IRQS_DISABLED); irq_soft_mask_set(IRQS_DISABLED);
trace_hardirqs_off(); trace_hardirqs_off();
/* /*
...@@ -297,7 +284,7 @@ notrace void arch_local_irq_restore(unsigned long mask) ...@@ -297,7 +284,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
/* We can soft-enable now */ /* We can soft-enable now */
trace_hardirqs_on(); trace_hardirqs_on();
soft_enabled_set(IRQS_ENABLED); irq_soft_mask_set(IRQS_ENABLED);
/* /*
* And replay if we have to. This will return with interrupts * And replay if we have to. This will return with interrupts
...@@ -372,7 +359,7 @@ bool prep_irq_for_idle(void) ...@@ -372,7 +359,7 @@ bool prep_irq_for_idle(void)
* of entering the low power state. * of entering the low power state.
*/ */
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
soft_enabled_set(IRQS_ENABLED); irq_soft_mask_set(IRQS_ENABLED);
/* Tell the caller to enter the low power state */ /* Tell the caller to enter the low power state */
return true; return true;
......
...@@ -58,7 +58,7 @@ optprobe_template_entry: ...@@ -58,7 +58,7 @@ optprobe_template_entry:
std r5,_XER(r1) std r5,_XER(r1)
mfcr r5 mfcr r5
std r5,_CCR(r1) std r5,_CCR(r1)
lbz r5,PACASOFTIRQEN(r13) lbz r5,PACAIRQSOFTMASK(r13)
std r5,SOFTE(r1) std r5,SOFTE(r1)
/* /*
......
...@@ -285,7 +285,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) ...@@ -285,7 +285,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* /*
* softe copies paca->soft_enabled variable state. Since soft_enabled is * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
* no more used as a flag, lets force usr to alway see the softe value as 1 * no more used as a flag, lets force usr to alway see the softe value as 1
* which means interrupts are not soft disabled. * which means interrupts are not soft disabled.
*/ */
......
...@@ -189,7 +189,7 @@ static void __init fixup_boot_paca(void) ...@@ -189,7 +189,7 @@ static void __init fixup_boot_paca(void)
/* Allow percpu accesses to work until we setup percpu data */ /* Allow percpu accesses to work until we setup percpu data */
get_paca()->data_offset = 0; get_paca()->data_offset = 0;
/* Mark interrupts disabled in PACA */ /* Mark interrupts disabled in PACA */
soft_enabled_set(IRQS_DISABLED); irq_soft_mask_set(IRQS_DISABLED);
} }
static void __init configure_exceptions(void) static void __init configure_exceptions(void)
...@@ -352,7 +352,7 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -352,7 +352,7 @@ void __init early_setup(unsigned long dt_ptr)
void early_setup_secondary(void) void early_setup_secondary(void)
{ {
/* Mark interrupts disabled in PACA */ /* Mark interrupts disabled in PACA */
soft_enabled_set(IRQS_DISABLED); irq_soft_mask_set(IRQS_DISABLED);
/* Initialize the hash table or TLB handling */ /* Initialize the hash table or TLB handling */
early_init_mmu_secondary(); early_init_mmu_secondary();
......
...@@ -244,7 +244,7 @@ static u64 scan_dispatch_log(u64 stop_tb) ...@@ -244,7 +244,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
void accumulate_stolen_time(void) void accumulate_stolen_time(void)
{ {
u64 sst, ust; u64 sst, ust;
unsigned long save_soft_enabled = soft_enabled_return(); unsigned long save_irq_soft_mask = irq_soft_mask_return();
struct cpu_accounting_data *acct = &local_paca->accounting; struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before /* We are called early in the exception entry, before
...@@ -253,7 +253,7 @@ void accumulate_stolen_time(void) ...@@ -253,7 +253,7 @@ void accumulate_stolen_time(void)
* needs to reflect that so various debug stuff doesn't * needs to reflect that so various debug stuff doesn't
* complain * complain
*/ */
soft_enabled_set(IRQS_DISABLED); irq_soft_mask_set(IRQS_DISABLED);
sst = scan_dispatch_log(acct->starttime_user); sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime); ust = scan_dispatch_log(acct->starttime);
...@@ -261,7 +261,7 @@ void accumulate_stolen_time(void) ...@@ -261,7 +261,7 @@ void accumulate_stolen_time(void)
acct->utime -= ust; acct->utime -= ust;
acct->steal_time += ust + sst; acct->steal_time += ust + sst;
soft_enabled_set(save_soft_enabled); irq_soft_mask_set(save_irq_soft_mask);
} }
static inline u64 calculate_stolen_time(u64 stop_tb) static inline u64 calculate_stolen_time(u64 stop_tb)
......
...@@ -3249,7 +3249,7 @@ kvmppc_bad_host_intr: ...@@ -3249,7 +3249,7 @@ kvmppc_bad_host_intr:
mfctr r4 mfctr r4
#endif #endif
mfxer r5 mfxer r5
lbz r6, PACASOFTIRQEN(r13) lbz r6, PACAIRQSOFTMASK(r13)
std r3, _LINK(r1) std r3, _LINK(r1)
std r4, _CTR(r1) std r4, _CTR(r1)
std r5, _XER(r1) std r5, _XER(r1)
......
...@@ -752,7 +752,7 @@ void flush_dcache_icache_hugepage(struct page *page) ...@@ -752,7 +752,7 @@ void flush_dcache_icache_hugepage(struct page *page)
* So long as we atomically load page table pointers we are safe against teardown, * So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the the page and take a ref on it. * we can follow the address down to the the page and take a ref on it.
* This function need to be called with interrupts disabled. We use this variant * This function need to be called with interrupts disabled. We use this variant
* when we have MSR[EE] = 0 but the paca->soft_enabled = IRQS_ENABLED * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
*/ */
pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hpage_shift) bool *is_thp, unsigned *hpage_shift)
......
...@@ -1623,7 +1623,7 @@ static void excprint(struct pt_regs *fp) ...@@ -1623,7 +1623,7 @@ static void excprint(struct pt_regs *fp)
printf(" current = 0x%lx\n", current); printf(" current = 0x%lx\n", current);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n",
local_paca, local_paca->soft_enabled, local_paca->irq_happened); local_paca, local_paca->irq_soft_mask, local_paca->irq_happened);
#endif #endif
if (current) { if (current) {
printf(" pid = %ld, comm = %s\n", printf(" pid = %ld, comm = %s\n",
...@@ -2391,7 +2391,7 @@ static void dump_one_paca(int cpu) ...@@ -2391,7 +2391,7 @@ static void dump_one_paca(int cpu)
DUMP(p, stab_rr, "lx"); DUMP(p, stab_rr, "lx");
DUMP(p, saved_r1, "lx"); DUMP(p, saved_r1, "lx");
DUMP(p, trap_save, "x"); DUMP(p, trap_save, "x");
DUMP(p, soft_enabled, "x"); DUMP(p, irq_soft_mask, "x");
DUMP(p, irq_happened, "x"); DUMP(p, irq_happened, "x");
DUMP(p, io_sync, "x"); DUMP(p, io_sync, "x");
DUMP(p, irq_work_pending, "x"); DUMP(p, irq_work_pending, "x");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment