Commit f1ba9a5b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Benjamin Herrenschmidt

powerpc: Unconditionally enabled irq stacks

Irq stacks provide an essential protection from stack overflows through
external interrupts, at the cost of two additionals stacks per CPU.

Enable them unconditionally to simplify the kernel build and prevent
people from accidentally disabling them.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent b636f137
...@@ -135,13 +135,6 @@ config DEBUGGER ...@@ -135,13 +135,6 @@ config DEBUGGER
depends on KGDB || XMON depends on KGDB || XMON
default y default y
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
config VIRQ_DEBUG config VIRQ_DEBUG
bool "Expose hardware/virtual IRQ mapping via debugfs" bool "Expose hardware/virtual IRQ mapping via debugfs"
depends on DEBUG_FS depends on DEBUG_FS
......
...@@ -358,7 +358,6 @@ extern void exc_lvl_ctx_init(void); ...@@ -358,7 +358,6 @@ extern void exc_lvl_ctx_init(void);
#define exc_lvl_ctx_init() #define exc_lvl_ctx_init()
#endif #endif
#ifdef CONFIG_IRQSTACKS
/* /*
* Per-cpu stacks for handling hard and soft interrupts. * Per-cpu stacks for handling hard and soft interrupts.
*/ */
...@@ -369,11 +368,6 @@ extern void irq_ctx_init(void); ...@@ -369,11 +368,6 @@ extern void irq_ctx_init(void);
extern void call_do_softirq(struct thread_info *tp); extern void call_do_softirq(struct thread_info *tp);
extern int call_handle_irq(int irq, void *p1, extern int call_handle_irq(int irq, void *p1,
struct thread_info *tp, void *func); struct thread_info *tp, void *func);
#else
#define irq_ctx_init()
#endif /* CONFIG_IRQSTACKS */
extern void do_IRQ(struct pt_regs *regs); extern void do_IRQ(struct pt_regs *regs);
#endif /* _ASM_IRQ_H */ #endif /* _ASM_IRQ_H */
......
...@@ -317,7 +317,6 @@ void fixup_irqs(const struct cpumask *map) ...@@ -317,7 +317,6 @@ void fixup_irqs(const struct cpumask *map)
} }
#endif #endif
#ifdef CONFIG_IRQSTACKS
static inline void handle_one_irq(unsigned int irq) static inline void handle_one_irq(unsigned int irq)
{ {
struct thread_info *curtp, *irqtp; struct thread_info *curtp, *irqtp;
...@@ -358,12 +357,6 @@ static inline void handle_one_irq(unsigned int irq) ...@@ -358,12 +357,6 @@ static inline void handle_one_irq(unsigned int irq)
if (irqtp->flags) if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags); set_bits(irqtp->flags, &curtp->flags);
} }
#else
static inline void handle_one_irq(unsigned int irq)
{
generic_handle_irq(irq);
}
#endif
static inline void check_stack_overflow(void) static inline void check_stack_overflow(void)
{ {
...@@ -455,7 +448,6 @@ void exc_lvl_ctx_init(void) ...@@ -455,7 +448,6 @@ void exc_lvl_ctx_init(void)
} }
#endif #endif
#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
...@@ -492,10 +484,6 @@ static inline void do_softirq_onstack(void) ...@@ -492,10 +484,6 @@ static inline void do_softirq_onstack(void)
irqtp->task = NULL; irqtp->task = NULL;
} }
#else
#define do_softirq_onstack() __do_softirq()
#endif /* CONFIG_IRQSTACKS */
void do_softirq(void) void do_softirq(void)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
.text .text
#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq) _GLOBAL(call_do_softirq)
mflr r0 mflr r0
stw r0,4(r1) stw r0,4(r1)
...@@ -56,7 +55,6 @@ _GLOBAL(call_handle_irq) ...@@ -56,7 +55,6 @@ _GLOBAL(call_handle_irq)
lwz r0,4(r1) lwz r0,4(r1)
mtlr r0 mtlr r0
blr blr
#endif /* CONFIG_IRQSTACKS */
/* /*
* This returns the high 64 bits of the product of two 64-bit numbers. * This returns the high 64 bits of the product of two 64-bit numbers.
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
.text .text
#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq) _GLOBAL(call_do_softirq)
mflr r0 mflr r0
std r0,16(r1) std r0,16(r1)
...@@ -52,7 +51,6 @@ _GLOBAL(call_handle_irq) ...@@ -52,7 +51,6 @@ _GLOBAL(call_handle_irq)
ld r0,16(r1) ld r0,16(r1)
mtlr r0 mtlr r0
blr blr
#endif /* CONFIG_IRQSTACKS */
.section ".toc","aw" .section ".toc","aw"
PPC64_CACHES: PPC64_CACHES:
......
...@@ -1005,7 +1005,6 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, ...@@ -1005,7 +1005,6 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
return error; return error;
} }
#ifdef CONFIG_IRQSTACKS
static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes) unsigned long nbytes)
{ {
...@@ -1030,10 +1029,6 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, ...@@ -1030,10 +1029,6 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
return 0; return 0;
} }
#else
#define valid_irq_stack(sp, p, nb) 0
#endif /* CONFIG_IRQSTACKS */
int validate_sp(unsigned long sp, struct task_struct *p, int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes) unsigned long nbytes)
{ {
......
...@@ -241,7 +241,6 @@ int __init ppc_init(void) ...@@ -241,7 +241,6 @@ int __init ppc_init(void)
arch_initcall(ppc_init); arch_initcall(ppc_init);
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void) static void __init irqstack_early_init(void)
{ {
unsigned int i; unsigned int i;
...@@ -255,9 +254,6 @@ static void __init irqstack_early_init(void) ...@@ -255,9 +254,6 @@ static void __init irqstack_early_init(void)
__va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
} }
} }
#else
#define irqstack_early_init()
#endif
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
static void __init exc_lvl_early_init(void) static void __init exc_lvl_early_init(void)
......
...@@ -432,7 +432,6 @@ static u64 slb0_limit(void) ...@@ -432,7 +432,6 @@ static u64 slb0_limit(void)
return 1UL << SID_SHIFT; return 1UL << SID_SHIFT;
} }
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void) static void __init irqstack_early_init(void)
{ {
u64 limit = slb0_limit(); u64 limit = slb0_limit();
...@@ -451,9 +450,6 @@ static void __init irqstack_early_init(void) ...@@ -451,9 +450,6 @@ static void __init irqstack_early_init(void)
THREAD_SIZE, limit)); THREAD_SIZE, limit));
} }
} }
#else
#define irqstack_early_init()
#endif
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void) static void __init exc_lvl_early_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment