Commit ed6b676c authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86_64: Switch to the interrupt stack when running a softirq in local_bh_enable()

This avoids some potential stack overflows with very deep softirq callchains.
i386 does this too.

TOADD CFI annotation
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3829ee6b
......@@ -918,3 +918,15 @@ ENTRY(machine_check)
ENTRY(call_debug)
zeroentry do_call_debug
ENTRY(call_softirq)
movq %gs:pda_irqstackptr,%rax
pushq %r15
movq %rsp,%r15
incl %gs:pda_irqcount
cmove %rax,%rsp
call __do_softirq
movq %r15,%rsp
decl %gs:pda_irqcount
popq %r15
ret
......@@ -135,3 +135,22 @@ void fixup_irqs(cpumask_t map)
local_irq_disable();
}
#endif
extern void call_softirq(void);
asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
/* Switch to interrupt stack */
if (pending)
call_softirq();
local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);
......@@ -57,4 +57,6 @@ int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
extern void fixup_irqs(cpumask_t map);
#endif
#define __ARCH_HAS_DO_SOFTIRQ 1
#endif /* _ASM_IRQ_H */
......@@ -86,7 +86,7 @@ asmlinkage void __do_softirq(void)
/* Reset the pending bitmask before enabling irqs */
local_softirq_pending() = 0;
local_irq_enable();
//local_irq_enable();
h = softirq_vec;
......@@ -99,7 +99,7 @@ asmlinkage void __do_softirq(void)
pending >>= 1;
} while (pending);
local_irq_disable();
//local_irq_disable();
pending = local_softirq_pending();
if (pending && --max_restart)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment