Commit 211b2fce authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Hotplug CPUs: Read Copy Update Changes

Add hook for RCU to handle jobs on dead cpu.  Requires new
tasklet_kill_immediate for RCU to clean up its tasklet (which might
have been about to run, so tasklet_kill won't work).
parent eac9256c
......@@ -211,6 +211,7 @@ static inline void tasklet_hi_enable(struct tasklet_struct *t)
}
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
......
......@@ -110,6 +110,7 @@ static void rcu_start_batch(long newbatch)
!cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
return;
}
/* Can't change, since spin lock held. */
rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
}
......@@ -154,6 +155,60 @@ static void rcu_check_quiescent_state(void)
}
#ifdef CONFIG_HOTPLUG_CPU
/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
* locking requirements, the list it's pulling from has to belong to a cpu
* which is dead and hence not processing interrupts.
*/
static void rcu_move_batch(struct list_head *list)
{
struct list_head *entry;
int cpu = smp_processor_id();
local_irq_disable();
while (!list_empty(list)) {
entry = list->next;
list_del(entry);
list_add_tail(entry, &RCU_nxtlist(cpu));
}
local_irq_enable();
}
static void rcu_offline_cpu(int cpu)
{
/* if the cpu going offline owns the grace period
* we can block indefinitely waiting for it, so flush
* it here
*/
spin_lock_irq(&rcu_ctrlblk.mutex);
if (!rcu_ctrlblk.rcu_cpu_mask)
goto unlock;
cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
rcu_ctrlblk.curbatch++;
/* We may avoid calling start batch if
* we are starting the batch only
* because of the DEAD CPU (the current
* CPU will start a new batch anyway for
* the callbacks we will move to current CPU).
* However, we will avoid this optimisation
* for now.
*/
rcu_start_batch(rcu_ctrlblk.maxbatch);
}
unlock:
spin_unlock_irq(&rcu_ctrlblk.mutex);
rcu_move_batch(&RCU_curlist(cpu));
rcu_move_batch(&RCU_nxtlist(cpu));
tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
}
#endif
/*
* This does the RCU processing work from tasklet context.
*/
......@@ -214,7 +269,11 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self,
case CPU_UP_PREPARE:
rcu_online_cpu(cpu);
break;
/* Space reserved for CPU_OFFLINE :) */
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
rcu_offline_cpu(cpu);
break;
#endif
default:
break;
}
......
......@@ -333,6 +333,37 @@ static int ksoftirqd(void * __bind_cpu)
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* tasklet_kill_immediate is called to remove a tasklet which can already be
* scheduled for execution on @cpu.
*
* Unlike tasklet_kill, this function removes the tasklet
* _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
*
* When this function is called, @cpu must be in the CPU_DEAD state.
*/
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
{
struct tasklet_struct **i;
BUG_ON(cpu_online(cpu));
BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
if (!test_bit(TASKLET_STATE_SCHED, &t->state))
return;
/* CPU is dead, so no lock needed. */
for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
if (*i == t) {
*i = t->next;
return;
}
}
BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */
static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment