Commit 3ac57d34 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Make ksoftirqd a normal per-cpu variable.

This moves the ksoftirqd pointers out of the irq_stat struct, and uses a
normal per-cpu variable.  It's not that time critical, nor referenced in
assembler.  This moves us closer to making irq_stat a per-cpu variable.

Because some archs have hardcoded asm references to offsets in this
structure, I haven't touched non-x86.  The __ksoftirqd_task field is
unused in other archs, too.
parent 7b957b78
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
unsigned long idle_timestamp; unsigned long idle_timestamp;
unsigned int __nmi_count; /* arch dependent */ unsigned int __nmi_count; /* arch dependent */
unsigned int apic_timer_irqs; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */
......
...@@ -29,8 +29,6 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ ...@@ -29,8 +29,6 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
/* arch independent irq_stat fields */ /* arch independent irq_stat fields */
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
#define local_softirq_pending() softirq_pending(smp_processor_id()) #define local_softirq_pending() softirq_pending(smp_processor_id())
#define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task)
#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id())
/* arch dependent irq_stat fields */ /* arch dependent irq_stat fields */
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h> #include <linux/cpu.h>
/* /*
...@@ -41,15 +42,18 @@ EXPORT_SYMBOL(irq_stat); ...@@ -41,15 +42,18 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
/* /*
* we cannot loop indefinitely here to avoid userspace starvation, * we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency * but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance * to the pending events, so lets the scheduler to balance
* the softirq load for us. * the softirq load for us.
*/ */
static inline void wakeup_softirqd(unsigned cpu) static inline void wakeup_softirqd(void)
{ {
struct task_struct * tsk = ksoftirqd_task(cpu); /* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __get_cpu_var(ksoftirqd);
if (tsk && tsk->state != TASK_RUNNING) if (tsk && tsk->state != TASK_RUNNING)
wake_up_process(tsk); wake_up_process(tsk);
...@@ -96,7 +100,7 @@ asmlinkage void do_softirq(void) ...@@ -96,7 +100,7 @@ asmlinkage void do_softirq(void)
goto restart; goto restart;
} }
if (pending) if (pending)
wakeup_softirqd(smp_processor_id()); wakeup_softirqd();
__local_bh_enable(); __local_bh_enable();
} }
...@@ -131,7 +135,7 @@ inline void raise_softirq_irqoff(unsigned int nr) ...@@ -131,7 +135,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
* schedule the softirq soon. * schedule the softirq soon.
*/ */
if (!in_interrupt()) if (!in_interrupt())
wakeup_softirqd(cpu); wakeup_softirqd();
} }
void raise_softirq(unsigned int nr) void raise_softirq(unsigned int nr)
...@@ -325,7 +329,7 @@ static int ksoftirqd(void * __bind_cpu) ...@@ -325,7 +329,7 @@ static int ksoftirqd(void * __bind_cpu)
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
mb(); mb();
local_ksoftirqd_task() = current; __get_cpu_var(ksoftirqd) = current;
for (;;) { for (;;) {
if (!local_softirq_pending()) if (!local_softirq_pending())
...@@ -354,7 +358,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, ...@@ -354,7 +358,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
return NOTIFY_BAD; return NOTIFY_BAD;
} }
while (!ksoftirqd_task(hotcpu)) while (!per_cpu(ksoftirqd, hotcpu))
yield(); yield();
} }
return NOTIFY_OK; return NOTIFY_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment