Commit f2c50921 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Arnd Bergmann

arch/*: Disable softirq stacks on PREEMPT_RT.

PREEMPT_RT preempts softirqs and the current implementation avoids
do_softirq_own_stack() and only uses __do_softirq().

Disable the unused softirqs stacks on PREEMPT_RT to save some memory and
ensure that do_softirq_own_stack() is not used bwcause it is not expected.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parent b13baccc
...@@ -70,6 +70,7 @@ static void __init init_irq_stacks(void) ...@@ -70,6 +70,7 @@ static void __init init_irq_stacks(void)
} }
} }
#ifndef CONFIG_PREEMPT_RT
static void ____do_softirq(void *arg) static void ____do_softirq(void *arg)
{ {
__do_softirq(); __do_softirq();
...@@ -80,7 +81,7 @@ void do_softirq_own_stack(void) ...@@ -80,7 +81,7 @@ void do_softirq_own_stack(void)
call_with_stack(____do_softirq, NULL, call_with_stack(____do_softirq, NULL,
__this_cpu_read(irq_stack_ptr)); __this_cpu_read(irq_stack_ptr));
} }
#endif
#endif #endif
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
......
...@@ -480,10 +480,12 @@ static void execute_on_irq_stack(void *func, unsigned long param1) ...@@ -480,10 +480,12 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1; *irq_stack_in_use = 1;
} }
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
execute_on_irq_stack(__do_softirq, 0); execute_on_irq_stack(__do_softirq, 0);
} }
#endif
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */ /* ONLY called from entry.S:intr_extint() */
......
...@@ -611,6 +611,7 @@ static inline void check_stack_overflow(void) ...@@ -611,6 +611,7 @@ static inline void check_stack_overflow(void)
} }
} }
#ifndef CONFIG_PREEMPT_RT
static __always_inline void call_do_softirq(const void *sp) static __always_inline void call_do_softirq(const void *sp)
{ {
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
...@@ -629,6 +630,7 @@ static __always_inline void call_do_softirq(const void *sp) ...@@ -629,6 +630,7 @@ static __always_inline void call_do_softirq(const void *sp)
"r11", "r12" "r11", "r12"
); );
} }
#endif
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
{ {
...@@ -747,10 +749,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly; ...@@ -747,10 +749,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
void *softirq_ctx[NR_CPUS] __read_mostly; void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly; void *hardirq_ctx[NR_CPUS] __read_mostly;
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
call_do_softirq(softirq_ctx[smp_processor_id()]); call_do_softirq(softirq_ctx[smp_processor_id()]);
} }
#endif
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
......
...@@ -5,9 +5,10 @@ ...@@ -5,9 +5,10 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#ifndef CONFIG_PREEMPT_RT
static inline void do_softirq_own_stack(void) static inline void do_softirq_own_stack(void)
{ {
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq); call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
} }
#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */ #endif /* __ASM_S390_SOFTIRQ_STACK_H */
...@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) ...@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL; hardirq_ctx[cpu] = NULL;
} }
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
struct thread_info *curctx; struct thread_info *curctx;
...@@ -176,6 +177,7 @@ void do_softirq_own_stack(void) ...@@ -176,6 +177,7 @@ void do_softirq_own_stack(void)
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
); );
} }
#endif
#else #else
static inline void handle_one_irq(unsigned int irq) static inline void handle_one_irq(unsigned int irq)
{ {
......
...@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) ...@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
void *orig_sp, *sp = softirq_stack[smp_processor_id()]; void *orig_sp, *sp = softirq_stack[smp_processor_id()];
...@@ -869,6 +870,7 @@ void do_softirq_own_stack(void) ...@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
__asm__ __volatile__("mov %0, %%sp" __asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp)); : : "r" (orig_sp));
} }
#endif
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void) void fixup_irqs(void)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H #define __ASM_GENERIC_SOFTIRQ_STACK_H
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK #if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
void do_softirq_own_stack(void); void do_softirq_own_stack(void);
#else #else
static inline void do_softirq_own_stack(void) static inline void do_softirq_own_stack(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment