Commit f0f4d6e4 authored by Dipankar Sarma's avatar Dipankar Sarma Committed by Linus Torvalds

[PATCH] RCU - cpu offline fix

This fixes the RCU cpu offline code which was broken by singly-linked RCU
changes.  Nathan pointed out the problems and submitted a patch for this.
This is an optimal fix - no need to iterate through the list of callbacks,
just use the tail pointers and attach the list from the dead cpu.
Signed-off-by: default avatarNathan Lynch <nathanl@austin.ibm.com>
Signed-off-by: default avatarDipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bcce6313
......@@ -98,6 +98,7 @@ struct rcu_data {
struct rcu_head *nxtlist;
struct rcu_head **nxttail;
struct rcu_head *curlist;
struct rcu_head **curtail;
};
DECLARE_PER_CPU(struct rcu_data, rcu_data);
......@@ -111,6 +112,7 @@ extern struct rcu_ctrlblk rcu_ctrlblk;
#define RCU_nxtlist(cpu) (per_cpu(rcu_data, (cpu)).nxtlist)
#define RCU_curlist(cpu) (per_cpu(rcu_data, (cpu)).curlist)
#define RCU_nxttail(cpu) (per_cpu(rcu_data, (cpu)).nxttail)
#define RCU_curtail(cpu) (per_cpu(rcu_data, (cpu)).curtail)
static inline int rcu_pending(int cpu)
{
......
......@@ -210,19 +210,15 @@ static void rcu_check_quiescent_state(void)
* locking requirements, the list it's pulling from has to belong to a cpu
* which is dead and hence not processing interrupts.
*/
static void rcu_move_batch(struct rcu_head *list)
static void rcu_move_batch(struct rcu_head *list, struct rcu_head **tail)
{
int cpu;
local_irq_disable();
cpu = smp_processor_id();
while (list != NULL) {
*RCU_nxttail(cpu) = list;
RCU_nxttail(cpu) = &list->next;
list = list->next;
}
if (list)
RCU_nxttail(cpu) = tail;
local_irq_enable();
}
......@@ -237,8 +233,8 @@ static void rcu_offline_cpu(int cpu)
cpu_quiet(cpu);
spin_unlock_bh(&rcu_state.mutex);
rcu_move_batch(RCU_curlist(cpu));
rcu_move_batch(RCU_nxtlist(cpu));
rcu_move_batch(RCU_curlist(cpu), RCU_curtail(cpu));
rcu_move_batch(RCU_nxtlist(cpu), RCU_nxttail(cpu));
tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
}
......@@ -271,6 +267,7 @@ static void rcu_process_callbacks(unsigned long unused)
!rcu_batch_before(rcu_ctrlblk.completed, RCU_batch(cpu))) {
rcu_list = RCU_curlist(cpu);
RCU_curlist(cpu) = NULL;
RCU_curtail(cpu) = &RCU_curlist(cpu);
}
local_irq_disable();
......@@ -278,6 +275,7 @@ static void rcu_process_callbacks(unsigned long unused)
int next_pending, seq;
RCU_curlist(cpu) = RCU_nxtlist(cpu);
RCU_curtail(cpu) = RCU_nxttail(cpu);
RCU_nxtlist(cpu) = NULL;
RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
local_irq_enable();
......@@ -319,6 +317,7 @@ static void __devinit rcu_online_cpu(int cpu)
{
memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
RCU_curtail(cpu) = &RCU_curlist(cpu);
RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
RCU_qs_pending(cpu) = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment