Commit d6714c22 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Ingo Molnar

rcu: Renamings to increase RCU clarity

Make RCU-sched, RCU-bh, and RCU-preempt be underlying
implementations, with "RCU" defined in terms of one of the
three.  Update the outdated rcu_qsctr_inc() names, as these
functions no longer increment anything.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746132696-git-send-email->
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9f77da9f
...@@ -191,8 +191,7 @@ rcu/rcuhier (which displays the struct rcu_node hierarchy). ...@@ -191,8 +191,7 @@ rcu/rcuhier (which displays the struct rcu_node hierarchy).
The output of "cat rcu/rcudata" looks as follows: The output of "cat rcu/rcudata" looks as follows:
rcu: rcu_sched:
rcu:
0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10
1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10
2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10
...@@ -306,7 +305,7 @@ comma-separated-variable spreadsheet format. ...@@ -306,7 +305,7 @@ comma-separated-variable spreadsheet format.
The output of "cat rcu/rcugp" looks as follows: The output of "cat rcu/rcugp" looks as follows:
rcu: completed=33062 gpnum=33063 rcu_sched: completed=33062 gpnum=33063
rcu_bh: completed=464 gpnum=464 rcu_bh: completed=464 gpnum=464
Again, this output is for both "rcu" and "rcu_bh". The fields are Again, this output is for both "rcu" and "rcu_bh". The fields are
...@@ -413,7 +412,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct ...@@ -413,7 +412,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
The output of "cat rcu/rcu_pending" looks as follows: The output of "cat rcu/rcu_pending" looks as follows:
rcu: rcu_sched:
0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
......
...@@ -157,17 +157,28 @@ extern int rcu_scheduler_active; ...@@ -157,17 +157,28 @@ extern int rcu_scheduler_active;
* - call_rcu_sched() and rcu_barrier_sched() * - call_rcu_sched() and rcu_barrier_sched()
* on the write-side to insure proper synchronization. * on the write-side to insure proper synchronization.
*/ */
#define rcu_read_lock_sched() preempt_disable() static inline void rcu_read_lock_sched(void)
#define rcu_read_lock_sched_notrace() preempt_disable_notrace() {
preempt_disable();
}
static inline void rcu_read_lock_sched_notrace(void)
{
preempt_disable_notrace();
}
/* /*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
* *
* See rcu_read_lock_sched for more information. * See rcu_read_lock_sched for more information.
*/ */
#define rcu_read_unlock_sched() preempt_enable() static inline void rcu_read_unlock_sched(void)
#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() {
preempt_enable();
}
static inline void rcu_read_unlock_sched_notrace(void)
{
preempt_enable_notrace();
}
/** /**
......
...@@ -40,8 +40,8 @@ ...@@ -40,8 +40,8 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/seqlock.h> #include <linux/seqlock.h>
extern void rcu_qsctr_inc(int cpu); extern void rcu_sched_qs(int cpu);
static inline void rcu_bh_qsctr_inc(int cpu) { } static inline void rcu_bh_qs(int cpu) { }
/* /*
* Someone might want to pass call_rcu_bh as a function pointer. * Someone might want to pass call_rcu_bh as a function pointer.
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
#ifndef __LINUX_RCUTREE_H #ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H
extern void rcu_qsctr_inc(int cpu); extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qsctr_inc(int cpu); extern void rcu_bh_qs(int cpu);
extern int rcu_pending(int cpu); extern int rcu_pending(int cpu);
extern int rcu_needs_cpu(int cpu); extern int rcu_needs_cpu(int cpu);
...@@ -73,7 +73,8 @@ static inline void __rcu_read_unlock_bh(void) ...@@ -73,7 +73,8 @@ static inline void __rcu_read_unlock_bh(void)
#define __synchronize_sched() synchronize_rcu() #define __synchronize_sched() synchronize_rcu()
#define call_rcu_sched(head, func) call_rcu(head, func) extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
static inline void synchronize_rcu_expedited(void) static inline void synchronize_rcu_expedited(void)
{ {
...@@ -91,6 +92,7 @@ extern void rcu_restart_cpu(int cpu); ...@@ -91,6 +92,7 @@ extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void); extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
static inline void rcu_init_sched(void) static inline void rcu_init_sched(void)
{ {
......
...@@ -159,7 +159,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched ...@@ -159,7 +159,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched
.dynticks = 1, .dynticks = 1,
}; };
void rcu_qsctr_inc(int cpu) void rcu_sched_qs(int cpu)
{ {
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
...@@ -967,12 +967,12 @@ void rcu_check_callbacks(int cpu, int user) ...@@ -967,12 +967,12 @@ void rcu_check_callbacks(int cpu, int user)
* If this CPU took its interrupt from user mode or from the * If this CPU took its interrupt from user mode or from the
* idle loop, and this is not a nested interrupt, then * idle loop, and this is not a nested interrupt, then
* this CPU has to have exited all prior preept-disable * this CPU has to have exited all prior preept-disable
* sections of code. So increment the counter to note this. * sections of code. So invoke rcu_sched_qs() to note this.
* *
* The memory barrier is needed to handle the case where * The memory barrier is needed to handle the case where
* writes from a preempt-disable section of code get reordered * writes from a preempt-disable section of code get reordered
* into schedule() by this CPU's write buffer. So the memory * into schedule() by this CPU's write buffer. So the memory
* barrier makes sure that the rcu_qsctr_inc() is seen by other * barrier makes sure that the rcu_sched_qs() is seen by other
* CPUs to happen after any such write. * CPUs to happen after any such write.
*/ */
...@@ -980,7 +980,7 @@ void rcu_check_callbacks(int cpu, int user) ...@@ -980,7 +980,7 @@ void rcu_check_callbacks(int cpu, int user)
(idle_cpu(cpu) && !in_softirq() && (idle_cpu(cpu) && !in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) { hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
smp_mb(); /* Guard against aggressive schedule(). */ smp_mb(); /* Guard against aggressive schedule(). */
rcu_qsctr_inc(cpu); rcu_sched_qs(cpu);
} }
rcu_check_mb(cpu); rcu_check_mb(cpu);
......
...@@ -74,26 +74,25 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); ...@@ -74,26 +74,25 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
.n_force_qs_ngp = 0, \ .n_force_qs_ngp = 0, \
} }
struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
DEFINE_PER_CPU(struct rcu_data, rcu_data); DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
/* /*
* Increment the quiescent state counter. * Note a quiescent state. Because we do not need to know
* The counter is a bit degenerated: We do not need to know
* how many quiescent states passed, just if there was at least * how many quiescent states passed, just if there was at least
* one since the start of the grace period. Thus just a flag. * one since the start of the grace period, this just sets a flag.
*/ */
void rcu_qsctr_inc(int cpu) void rcu_sched_qs(int cpu)
{ {
struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
rdp->passed_quiesc = 1; rdp->passed_quiesc = 1;
rdp->passed_quiesc_completed = rdp->completed; rdp->passed_quiesc_completed = rdp->completed;
} }
void rcu_bh_qsctr_inc(int cpu) void rcu_bh_qs(int cpu)
{ {
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
rdp->passed_quiesc = 1; rdp->passed_quiesc = 1;
...@@ -113,12 +112,22 @@ static int qlowmark = 100; /* Once only this many pending, use blimit. */ ...@@ -113,12 +112,22 @@ static int qlowmark = 100; /* Once only this many pending, use blimit. */
static void force_quiescent_state(struct rcu_state *rsp, int relaxed); static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
/*
* Return the number of RCU-sched batches processed thus far for debug & stats.
*/
long rcu_batches_completed_sched(void)
{
return rcu_sched_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
/* /*
* Return the number of RCU batches processed thus far for debug & stats. * Return the number of RCU batches processed thus far for debug & stats.
* @@@ placeholder, maps to rcu_batches_completed_sched().
*/ */
long rcu_batches_completed(void) long rcu_batches_completed(void)
{ {
return rcu_state.completed; return rcu_batches_completed_sched();
} }
EXPORT_SYMBOL_GPL(rcu_batches_completed); EXPORT_SYMBOL_GPL(rcu_batches_completed);
...@@ -310,7 +319,7 @@ void rcu_irq_exit(void) ...@@ -310,7 +319,7 @@ void rcu_irq_exit(void)
WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
/* If the interrupt queued a callback, get out of dyntick mode. */ /* If the interrupt queued a callback, get out of dyntick mode. */
if (__get_cpu_var(rcu_data).nxtlist || if (__get_cpu_var(rcu_sched_data).nxtlist ||
__get_cpu_var(rcu_bh_data).nxtlist) __get_cpu_var(rcu_bh_data).nxtlist)
set_need_resched(); set_need_resched();
} }
...@@ -847,7 +856,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) ...@@ -847,7 +856,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
/* /*
* Move callbacks from the outgoing CPU to the running CPU. * Move callbacks from the outgoing CPU to the running CPU.
* Note that the outgoing CPU is now quiscent, so it is now * Note that the outgoing CPU is now quiscent, so it is now
* (uncharacteristically) safe to access it rcu_data structure. * (uncharacteristically) safe to access its rcu_data structure.
* Note also that we must carefully retain the order of the * Note also that we must carefully retain the order of the
* outgoing CPU's callbacks in order for rcu_barrier() to work * outgoing CPU's callbacks in order for rcu_barrier() to work
* correctly. Finally, note that we start all the callbacks * correctly. Finally, note that we start all the callbacks
...@@ -878,7 +887,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) ...@@ -878,7 +887,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
*/ */
static void rcu_offline_cpu(int cpu) static void rcu_offline_cpu(int cpu)
{ {
__rcu_offline_cpu(cpu, &rcu_state); __rcu_offline_cpu(cpu, &rcu_sched_state);
__rcu_offline_cpu(cpu, &rcu_bh_state); __rcu_offline_cpu(cpu, &rcu_bh_state);
} }
...@@ -973,17 +982,16 @@ void rcu_check_callbacks(int cpu, int user) ...@@ -973,17 +982,16 @@ void rcu_check_callbacks(int cpu, int user)
* Get here if this CPU took its interrupt from user * Get here if this CPU took its interrupt from user
* mode or from the idle loop, and if this is not a * mode or from the idle loop, and if this is not a
* nested interrupt. In this case, the CPU is in * nested interrupt. In this case, the CPU is in
* a quiescent state, so count it. * a quiescent state, so note it.
* *
* No memory barrier is required here because both * No memory barrier is required here because both
* rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
* only CPU-local variables that other CPUs neither * variables that other CPUs neither access nor modify,
* access nor modify, at least not while the corresponding * at least not while the corresponding CPU is online.
* CPU is online.
*/ */
rcu_qsctr_inc(cpu); rcu_sched_qs(cpu);
rcu_bh_qsctr_inc(cpu); rcu_bh_qs(cpu);
} else if (!in_softirq()) { } else if (!in_softirq()) {
...@@ -991,10 +999,10 @@ void rcu_check_callbacks(int cpu, int user) ...@@ -991,10 +999,10 @@ void rcu_check_callbacks(int cpu, int user)
* Get here if this CPU did not take its interrupt from * Get here if this CPU did not take its interrupt from
* softirq, in other words, if it is not interrupting * softirq, in other words, if it is not interrupting
* a rcu_bh read-side critical section. This is an _bh * a rcu_bh read-side critical section. This is an _bh
* critical section, so count it. * critical section, so note it.
*/ */
rcu_bh_qsctr_inc(cpu); rcu_bh_qs(cpu);
} }
raise_softirq(RCU_SOFTIRQ); raise_softirq(RCU_SOFTIRQ);
} }
...@@ -1174,7 +1182,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) ...@@ -1174,7 +1182,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*/ */
smp_mb(); /* See above block comment. */ smp_mb(); /* See above block comment. */
__rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); __rcu_process_callbacks(&rcu_sched_state,
&__get_cpu_var(rcu_sched_data));
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
/* /*
...@@ -1231,14 +1240,25 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), ...@@ -1231,14 +1240,25 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
} }
/* /*
* Queue an RCU callback for invocation after a grace period. * Queue an RCU-sched callback for invocation after a grace period.
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_state);
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* @@@ Queue an RCU callback for invocation after a grace period.
* @@@ Placeholder pending rcutree_plugin.h.
*/ */
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{ {
__call_rcu(head, func, &rcu_state); call_rcu_sched(head, func);
} }
EXPORT_SYMBOL_GPL(call_rcu); EXPORT_SYMBOL_GPL(call_rcu);
/* /*
* Queue an RCU for invocation after a quicker grace period. * Queue an RCU for invocation after a quicker grace period.
*/ */
...@@ -1311,7 +1331,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1311,7 +1331,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
*/ */
int rcu_pending(int cpu) int rcu_pending(int cpu)
{ {
return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
} }
...@@ -1324,7 +1344,7 @@ int rcu_pending(int cpu) ...@@ -1324,7 +1344,7 @@ int rcu_pending(int cpu)
int rcu_needs_cpu(int cpu) int rcu_needs_cpu(int cpu)
{ {
/* RCU callbacks either ready or pending? */ /* RCU callbacks either ready or pending? */
return per_cpu(rcu_data, cpu).nxtlist || return per_cpu(rcu_sched_data, cpu).nxtlist ||
per_cpu(rcu_bh_data, cpu).nxtlist; per_cpu(rcu_bh_data, cpu).nxtlist;
} }
...@@ -1418,7 +1438,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) ...@@ -1418,7 +1438,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
static void __cpuinit rcu_online_cpu(int cpu) static void __cpuinit rcu_online_cpu(int cpu)
{ {
rcu_init_percpu_data(cpu, &rcu_state); rcu_init_percpu_data(cpu, &rcu_sched_state);
rcu_init_percpu_data(cpu, &rcu_bh_state); rcu_init_percpu_data(cpu, &rcu_bh_state);
} }
...@@ -1545,10 +1565,10 @@ void __init __rcu_init(void) ...@@ -1545,10 +1565,10 @@ void __init __rcu_init(void)
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
rcu_init_one(&rcu_state); rcu_init_one(&rcu_sched_state);
RCU_DATA_PTR_INIT(&rcu_state, rcu_data); RCU_DATA_PTR_INIT(&rcu_sched_state, rcu_sched_data);
for_each_possible_cpu(i) for_each_possible_cpu(i)
rcu_boot_init_percpu_data(i, &rcu_state); rcu_boot_init_percpu_data(i, &rcu_sched_state);
rcu_init_one(&rcu_bh_state); rcu_init_one(&rcu_bh_state);
RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
for_each_possible_cpu(i) for_each_possible_cpu(i)
......
...@@ -238,8 +238,8 @@ struct rcu_state { ...@@ -238,8 +238,8 @@ struct rcu_state {
/* /*
* RCU implementation internal declarations: * RCU implementation internal declarations:
*/ */
extern struct rcu_state rcu_state; extern struct rcu_state rcu_sched_state;
DECLARE_PER_CPU(struct rcu_data, rcu_data); DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
extern struct rcu_state rcu_bh_state; extern struct rcu_state rcu_bh_state;
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
......
...@@ -77,8 +77,8 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) ...@@ -77,8 +77,8 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata(struct seq_file *m, void *unused) static int show_rcudata(struct seq_file *m, void *unused)
{ {
seq_puts(m, "rcu:\n"); seq_puts(m, "rcu_sched:\n");
PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m); PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
seq_puts(m, "rcu_bh:\n"); seq_puts(m, "rcu_bh:\n");
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
return 0; return 0;
...@@ -125,8 +125,8 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) ...@@ -125,8 +125,8 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */ #endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
seq_puts(m, "\"rcu:\"\n"); seq_puts(m, "\"rcu_sched:\"\n");
PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m); PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
seq_puts(m, "\"rcu_bh:\"\n"); seq_puts(m, "\"rcu_bh:\"\n");
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
return 0; return 0;
...@@ -172,8 +172,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) ...@@ -172,8 +172,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
static int show_rcuhier(struct seq_file *m, void *unused) static int show_rcuhier(struct seq_file *m, void *unused)
{ {
seq_puts(m, "rcu:\n"); seq_puts(m, "rcu_sched:\n");
print_one_rcu_state(m, &rcu_state); print_one_rcu_state(m, &rcu_sched_state);
seq_puts(m, "rcu_bh:\n"); seq_puts(m, "rcu_bh:\n");
print_one_rcu_state(m, &rcu_bh_state); print_one_rcu_state(m, &rcu_bh_state);
return 0; return 0;
...@@ -194,8 +194,8 @@ static struct file_operations rcuhier_fops = { ...@@ -194,8 +194,8 @@ static struct file_operations rcuhier_fops = {
static int show_rcugp(struct seq_file *m, void *unused) static int show_rcugp(struct seq_file *m, void *unused)
{ {
seq_printf(m, "rcu: completed=%ld gpnum=%ld\n", seq_printf(m, "rcu_sched: completed=%ld gpnum=%ld\n",
rcu_state.completed, rcu_state.gpnum); rcu_sched_state.completed, rcu_sched_state.gpnum);
seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
rcu_bh_state.completed, rcu_bh_state.gpnum); rcu_bh_state.completed, rcu_bh_state.gpnum);
return 0; return 0;
...@@ -244,8 +244,8 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp) ...@@ -244,8 +244,8 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
static int show_rcu_pending(struct seq_file *m, void *unused) static int show_rcu_pending(struct seq_file *m, void *unused)
{ {
seq_puts(m, "rcu:\n"); seq_puts(m, "rcu_sched:\n");
print_rcu_pendings(m, &rcu_state); print_rcu_pendings(m, &rcu_sched_state);
seq_puts(m, "rcu_bh:\n"); seq_puts(m, "rcu_bh:\n");
print_rcu_pendings(m, &rcu_bh_state); print_rcu_pendings(m, &rcu_bh_state);
return 0; return 0;
......
...@@ -5325,7 +5325,7 @@ asmlinkage void __sched schedule(void) ...@@ -5325,7 +5325,7 @@ asmlinkage void __sched schedule(void)
preempt_disable(); preempt_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
rcu_qsctr_inc(cpu); rcu_sched_qs(cpu);
prev = rq->curr; prev = rq->curr;
switch_count = &prev->nivcsw; switch_count = &prev->nivcsw;
......
...@@ -227,7 +227,7 @@ asmlinkage void __do_softirq(void) ...@@ -227,7 +227,7 @@ asmlinkage void __do_softirq(void)
preempt_count() = prev_count; preempt_count() = prev_count;
} }
rcu_bh_qsctr_inc(cpu); rcu_bh_qs(cpu);
} }
h++; h++;
pending >>= 1; pending >>= 1;
...@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu) ...@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu)
preempt_enable_no_resched(); preempt_enable_no_resched();
cond_resched(); cond_resched();
preempt_disable(); preempt_disable();
rcu_qsctr_inc((long)__bind_cpu); rcu_sched_qs((long)__bind_cpu);
} }
preempt_enable(); preempt_enable();
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment