Commit 60778176 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-rcu-for-linus' of...

Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits)
  rcu: Make RCU's CPU-stall detector be default
  rcu: Add expedited grace-period support for preemptible RCU
  rcu: Enable fourth level of TREE_RCU hierarchy
  rcu: Rename "quiet" functions
  rcu: Re-arrange code to reduce #ifdef pain
  rcu: Eliminate unneeded function wrapping
  rcu: Fix grace-period-stall bug on large systems with CPU hotplug
  rcu: Eliminate __rcu_pending() false positives
  rcu: Further cleanups of use of lastcomp
  rcu: Simplify association of forced quiescent states with grace periods
  rcu: Accelerate callback processing on CPUs not detecting GP end
  rcu: Mark init-time-only rcu_bootup_announce() as __init
  rcu: Simplify association of quiescent states with grace periods
  rcu: Rename dynticks_completed to completed_fqs
  rcu: Enable synchronize_sched_expedited() fastpath
  rcu: Remove inline from forward-referenced functions
  rcu: Fix note_new_gpnum() uses of ->gpnum
  rcu: Fix synchronization for rcu_process_gp_end() uses of ->completed counter
  rcu: Prepare for synchronization fixes: clean up for non-NO_HZ handling of ->completed counter
  rcu: Cleanup: balance rcu_irq_enter()/rcu_irq_exit() calls
  ...
parents d0b093a8 8bfb2f8e
This diff is collapsed.
...@@ -830,7 +830,7 @@ sched: Critical sections Grace period Barrier ...@@ -830,7 +830,7 @@ sched: Critical sections Grace period Barrier
SRCU: Critical sections Grace period Barrier SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu N/A srcu_read_lock synchronize_srcu N/A
srcu_read_unlock srcu_read_unlock synchronize_srcu_expedited
SRCU: Initialization/cleanup SRCU: Initialization/cleanup
init_srcu_struct init_srcu_struct
......
...@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk) ...@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk)
#endif #endif
#if defined(CONFIG_NO_HZ) #if defined(CONFIG_NO_HZ)
#if defined(CONFIG_TINY_RCU)
extern void rcu_enter_nohz(void);
extern void rcu_exit_nohz(void);
static inline void rcu_irq_enter(void)
{
rcu_exit_nohz();
}
static inline void rcu_irq_exit(void)
{
rcu_enter_nohz();
}
static inline void rcu_nmi_enter(void)
{
}
static inline void rcu_nmi_exit(void)
{
}
#else
extern void rcu_irq_enter(void); extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void); extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void); extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void); extern void rcu_nmi_exit(void);
#endif
#else #else
# define rcu_irq_enter() do { } while (0) # define rcu_irq_enter() do { } while (0)
# define rcu_irq_exit() do { } while (0) # define rcu_irq_exit() do { } while (0)
......
...@@ -52,11 +52,6 @@ struct rcu_head { ...@@ -52,11 +52,6 @@ struct rcu_head {
}; };
/* Exported common interfaces */ /* Exported common interfaces */
#ifdef CONFIG_TREE_PREEMPT_RCU
extern void synchronize_rcu(void);
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#define synchronize_rcu synchronize_sched
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
extern void synchronize_rcu_bh(void); extern void synchronize_rcu_bh(void);
extern void synchronize_sched(void); extern void synchronize_sched(void);
extern void rcu_barrier(void); extern void rcu_barrier(void);
...@@ -67,12 +62,11 @@ extern int sched_expedited_torture_stats(char *page); ...@@ -67,12 +62,11 @@ extern int sched_expedited_torture_stats(char *page);
/* Internal to kernel */ /* Internal to kernel */
extern void rcu_init(void); extern void rcu_init(void);
extern void rcu_scheduler_starting(void);
extern int rcu_needs_cpu(int cpu);
extern int rcu_scheduler_active;
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h> #include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
#else #else
#error "Unknown RCU implementation specified to kernel configuration" #error "Unknown RCU implementation specified to kernel configuration"
#endif #endif
......
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2008
*
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
#ifndef __LINUX_TINY_H
#define __LINUX_TINY_H
#include <linux/cache.h>
void rcu_sched_qs(int cpu);
void rcu_bh_qs(int cpu);
#define __rcu_read_lock() preempt_disable()
#define __rcu_read_unlock() preempt_enable()
#define __rcu_read_lock_bh() local_bh_disable()
#define __rcu_read_unlock_bh() local_bh_enable()
#define call_rcu_sched call_rcu
#define rcu_init_sched() do { } while (0)
extern void rcu_check_callbacks(int cpu, int user);
static inline int rcu_needs_cpu(int cpu)
{
return 0;
}
/*
* Return the number of grace periods.
*/
static inline long rcu_batches_completed(void)
{
return 0;
}
/*
* Return the number of bottom-half grace periods.
*/
static inline long rcu_batches_completed_bh(void)
{
return 0;
}
extern int rcu_expedited_torture_stats(char *page);
#define synchronize_rcu synchronize_sched
static inline void synchronize_rcu_expedited(void)
{
synchronize_sched();
}
static inline void synchronize_rcu_bh_expedited(void)
{
synchronize_sched();
}
struct notifier_block;
#ifdef CONFIG_NO_HZ
extern void rcu_enter_nohz(void);
extern void rcu_exit_nohz(void);
#else /* #ifdef CONFIG_NO_HZ */
static inline void rcu_enter_nohz(void)
{
}
static inline void rcu_exit_nohz(void)
{
}
#endif /* #else #ifdef CONFIG_NO_HZ */
static inline void rcu_scheduler_starting(void)
{
}
static inline void exit_rcu(void)
{
}
#endif /* __LINUX_RCUTINY_H */
...@@ -34,15 +34,15 @@ struct notifier_block; ...@@ -34,15 +34,15 @@ struct notifier_block;
extern void rcu_sched_qs(int cpu); extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu); extern void rcu_bh_qs(int cpu);
extern int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
extern int rcu_needs_cpu(int cpu); extern int rcu_needs_cpu(int cpu);
extern void rcu_scheduler_starting(void);
extern int rcu_expedited_torture_stats(char *page); extern int rcu_expedited_torture_stats(char *page);
#ifdef CONFIG_TREE_PREEMPT_RCU #ifdef CONFIG_TREE_PREEMPT_RCU
extern void __rcu_read_lock(void); extern void __rcu_read_lock(void);
extern void __rcu_read_unlock(void); extern void __rcu_read_unlock(void);
extern void synchronize_rcu(void);
extern void exit_rcu(void); extern void exit_rcu(void);
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
...@@ -57,7 +57,7 @@ static inline void __rcu_read_unlock(void) ...@@ -57,7 +57,7 @@ static inline void __rcu_read_unlock(void)
preempt_enable(); preempt_enable();
} }
#define __synchronize_sched() synchronize_rcu() #define synchronize_rcu synchronize_sched
static inline void exit_rcu(void) static inline void exit_rcu(void)
{ {
...@@ -83,7 +83,6 @@ static inline void synchronize_rcu_bh_expedited(void) ...@@ -83,7 +83,6 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched_expedited(); synchronize_sched_expedited();
} }
extern void __rcu_init(void);
extern void rcu_check_callbacks(int cpu, int user); extern void rcu_check_callbacks(int cpu, int user);
extern long rcu_batches_completed(void); extern long rcu_batches_completed(void);
......
...@@ -48,6 +48,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp); ...@@ -48,6 +48,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu(struct srcu_struct *sp);
void synchronize_srcu_expedited(struct srcu_struct *sp);
long srcu_batches_completed(struct srcu_struct *sp); long srcu_batches_completed(struct srcu_struct *sp);
#endif #endif
...@@ -334,6 +334,15 @@ config TREE_PREEMPT_RCU ...@@ -334,6 +334,15 @@ config TREE_PREEMPT_RCU
is also required. It also scales down nicely to is also required. It also scales down nicely to
smaller systems. smaller systems.
config TINY_RCU
bool "UP-only small-memory-footprint RCU"
depends on !SMP
help
This option selects the RCU implementation that is
designed for UP systems from which real-time response
is not required. This option greatly reduces the
memory footprint of RCU.
endchoice endchoice
config RCU_TRACE config RCU_TRACE
......
...@@ -82,6 +82,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o ...@@ -82,6 +82,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_TREE_RCU) += rcutree.o obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
obj-$(CONFIG_TINY_RCU) += rcutiny.o
obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel_stat.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key; static struct lock_class_key rcu_lock_key;
...@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map = ...@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
EXPORT_SYMBOL_GPL(rcu_lock_map); EXPORT_SYMBOL_GPL(rcu_lock_map);
#endif #endif
int rcu_scheduler_active __read_mostly;
/* /*
* Awaken the corresponding synchronize_rcu() instance now that a * Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed. * grace period has elapsed.
...@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head) ...@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
rcu = container_of(head, struct rcu_synchronize, head); rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion); complete(&rcu->completion);
} }
#ifdef CONFIG_TREE_PREEMPT_RCU
/**
* synchronize_rcu - wait until a grace period has elapsed.
*
* Control will return to the caller some time after a full grace
* period has elapsed, in other words after all currently executing RCU
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
if (!rcu_scheduler_active)
return;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
/**
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
*
* Control will return to the caller some time after a full rcu-sched
* grace period has elapsed, in other words after all currently executing
* rcu-sched read-side critical sections have completed. These read-side
* critical sections are delimited by rcu_read_lock_sched() and
* rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
* local_irq_disable(), and so on may be used in place of
* rcu_read_lock_sched().
*
* This means that all preempt_disable code sequences, including NMI and
* hardware-interrupt handlers, in progress on entry will have completed
* before this primitive returns. However, this does not guarantee that
* softirq handlers will have completed, since in some kernels, these
* handlers can run in process context, and can block.
*
* This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
* guarantees that rcu_read_lock() sections will have completed.
* In "classic RCU", these two guarantees happen to be one and
* the same, but can differ in realtime RCU implementations.
*/
void synchronize_sched(void)
{
struct rcu_synchronize rcu;
if (rcu_blocking_is_gp())
return;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(synchronize_sched);
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
* Control will return to the caller some time after a full rcu_bh grace
* period has elapsed, in other words after all currently executing rcu_bh
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
* and may be nested.
*/
void synchronize_rcu_bh(void)
{
struct rcu_synchronize rcu;
if (rcu_blocking_is_gp())
return;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_bh(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
unsigned long action, void *hcpu)
{
return rcu_cpu_notify(self, action, hcpu);
}
void __init rcu_init(void)
{
int i;
__rcu_init();
cpu_notifier(rcu_barrier_cpu_hotplug, 0);
/*
* We don't need protection against CPU-hotplug here because
* this is called early in boot, before either interrupts
* or the scheduler are operational.
*/
for_each_online_cpu(i)
rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
}
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_scheduler_active = 1;
}
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2008
*
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
#include <linux/moduleparam.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/cpu.h>
/* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk {
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
struct rcu_head **curtail; /* ->next pointer of last CB. */
};
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_ctrlblk = {
.donetail = &rcu_ctrlblk.rcucblist,
.curtail = &rcu_ctrlblk.rcucblist,
};
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.donetail = &rcu_bh_ctrlblk.rcucblist,
.curtail = &rcu_bh_ctrlblk.rcucblist,
};
#ifdef CONFIG_NO_HZ
static long rcu_dynticks_nesting = 1;
/*
* Enter dynticks-idle mode, which is an extended quiescent state
* if we have fully entered that mode (i.e., if the new value of
* dynticks_nesting is zero).
*/
void rcu_enter_nohz(void)
{
if (--rcu_dynticks_nesting == 0)
rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
}
/*
* Exit dynticks-idle mode, so that we are no longer in an extended
* quiescent state.
*/
void rcu_exit_nohz(void)
{
rcu_dynticks_nesting++;
}
#endif /* #ifdef CONFIG_NO_HZ */
/*
* Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
* Also disable irqs to avoid confusion due to interrupt handlers
* invoking call_rcu().
*/
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{
unsigned long flags;
local_irq_save(flags);
if (rcp->rcucblist != NULL &&
rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail;
local_irq_restore(flags);
return 1;
}
local_irq_restore(flags);
return 0;
}
/*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
*/
void rcu_sched_qs(int cpu)
{
if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
raise_softirq(RCU_SOFTIRQ);
}
/*
* Record an rcu_bh quiescent state.
*/
void rcu_bh_qs(int cpu)
{
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
raise_softirq(RCU_SOFTIRQ);
}
/*
* Check to see if the scheduling-clock interrupt came from an extended
* quiescent state, and, if so, tell RCU about it.
*/
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
(idle_cpu(cpu) &&
!in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT)))
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
}
/*
* Helper function for rcu_process_callbacks() that operates on the
* specified rcu_ctrlkblk structure.
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
struct rcu_head *next, *list;
unsigned long flags;
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail)
return;
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
list = rcp->rcucblist;
rcp->rcucblist = *rcp->donetail;
*rcp->donetail = NULL;
if (rcp->curtail == rcp->donetail)
rcp->curtail = &rcp->rcucblist;
rcp->donetail = &rcp->rcucblist;
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
while (list) {
next = list->next;
prefetch(next);
list->func(list);
list = next;
}
}
/*
* Invoke any callbacks whose grace period has completed.
*/
static void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
}
/*
* Wait for a grace period to elapse. But it is illegal to invoke
* synchronize_sched() from within an RCU read-side critical section.
* Therefore, any legal call to synchronize_sched() is a quiescent
* state, and so on a UP system, synchronize_sched() need do nothing.
* Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
* benefits of doing might_sleep() to reduce latency.)
*
* Cool, huh? (Due to Josh Triplett.)
*
* But we want to make this a static inline later.
*/
void synchronize_sched(void)
{
cond_resched();
}
EXPORT_SYMBOL_GPL(synchronize_sched);
void synchronize_rcu_bh(void)
{
synchronize_sched();
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
/*
* Helper function for call_rcu() and call_rcu_bh().
*/
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp)
{
unsigned long flags;
head->func = func;
head->next = NULL;
local_irq_save(flags);
*rcp->curtail = head;
rcp->curtail = &head->next;
local_irq_restore(flags);
}
/*
* Post an RCU callback to be invoked after the end of an RCU grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu);
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
* quiescent state.
*/
void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
void rcu_barrier(void)
{
struct rcu_synchronize rcu;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
void rcu_barrier_bh(void)
{
struct rcu_synchronize rcu;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_bh(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
void rcu_barrier_sched(void)
{
struct rcu_synchronize rcu;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
...@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p) ...@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p)
cur_ops->deferred_free(rp); cur_ops->deferred_free(rp);
} }
static int rcu_no_completed(void)
{
return 0;
}
static void rcu_torture_deferred_free(struct rcu_torture *p) static void rcu_torture_deferred_free(struct rcu_torture *p)
{ {
call_rcu(&p->rtort_rcu, rcu_torture_cb); call_rcu(&p->rtort_rcu, rcu_torture_cb);
...@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = { ...@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = {
.name = "rcu_sync" .name = "rcu_sync"
}; };
static struct rcu_torture_ops rcu_expedited_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_rcu_expedited,
.cb_barrier = NULL,
.stats = NULL,
.irq_capable = 1,
.name = "rcu_expedited"
};
/* /*
* Definitions for rcu_bh torture testing. * Definitions for rcu_bh torture testing.
*/ */
...@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = { ...@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = {
.name = "srcu" .name = "srcu"
}; };
static void srcu_torture_synchronize_expedited(void)
{
synchronize_srcu_expedited(&srcu_ctl);
}
static struct rcu_torture_ops srcu_expedited_ops = {
.init = srcu_torture_init,
.cleanup = srcu_torture_cleanup,
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
.completed = srcu_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = srcu_torture_synchronize_expedited,
.cb_barrier = NULL,
.stats = srcu_torture_stats,
.name = "srcu_expedited"
};
/* /*
* Definitions for sched torture testing. * Definitions for sched torture testing.
*/ */
...@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx) ...@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx)
preempt_enable(); preempt_enable();
} }
static int sched_torture_completed(void)
{
return 0;
}
static void rcu_sched_torture_deferred_free(struct rcu_torture *p) static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
{ {
call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
...@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = { ...@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = {
.readlock = sched_torture_read_lock, .readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock, .readunlock = sched_torture_read_unlock,
.completed = sched_torture_completed, .completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free, .deferred_free = rcu_sched_torture_deferred_free,
.sync = sched_torture_synchronize, .sync = sched_torture_synchronize,
.cb_barrier = rcu_barrier_sched, .cb_barrier = rcu_barrier_sched,
...@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = { ...@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = {
.name = "sched" .name = "sched"
}; };
static struct rcu_torture_ops sched_ops_sync = { static struct rcu_torture_ops sched_sync_ops = {
.init = rcu_sync_torture_init, .init = rcu_sync_torture_init,
.cleanup = NULL, .cleanup = NULL,
.readlock = sched_torture_read_lock, .readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock, .readunlock = sched_torture_read_unlock,
.completed = sched_torture_completed, .completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free, .deferred_free = rcu_sync_torture_deferred_free,
.sync = sched_torture_synchronize, .sync = sched_torture_synchronize,
.cb_barrier = NULL, .cb_barrier = NULL,
...@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = { ...@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
.readlock = sched_torture_read_lock, .readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock, .readunlock = sched_torture_read_unlock,
.completed = sched_torture_completed, .completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free, .deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_sched_expedited, .sync = synchronize_sched_expedited,
.cb_barrier = NULL, .cb_barrier = NULL,
...@@ -1097,9 +1131,10 @@ rcu_torture_init(void) ...@@ -1097,9 +1131,10 @@ rcu_torture_init(void)
int cpu; int cpu;
int firsterr = 0; int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] = static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
&sched_expedited_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
&srcu_ops, &sched_ops, &sched_ops_sync, }; &srcu_ops, &srcu_expedited_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex); mutex_lock(&fullstop_mutex);
...@@ -1110,8 +1145,12 @@ rcu_torture_init(void) ...@@ -1110,8 +1145,12 @@ rcu_torture_init(void)
break; break;
} }
if (i == ARRAY_SIZE(torture_ops)) { if (i == ARRAY_SIZE(torture_ops)) {
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
torture_type); torture_type);
printk(KERN_ALERT "rcu-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
printk(KERN_ALERT " %s", torture_ops[i]->name);
printk(KERN_ALERT "\n");
mutex_unlock(&fullstop_mutex); mutex_unlock(&fullstop_mutex);
return -EINVAL; return -EINVAL;
} }
......
This diff is collapsed.
...@@ -34,10 +34,11 @@ ...@@ -34,10 +34,11 @@
* In practice, this has not been tested, so there is probably some * In practice, this has not been tested, so there is probably some
* bug somewhere. * bug somewhere.
*/ */
#define MAX_RCU_LVLS 3 #define MAX_RCU_LVLS 4
#define RCU_FANOUT (CONFIG_RCU_FANOUT) #define RCU_FANOUT (CONFIG_RCU_FANOUT)
#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
#if NR_CPUS <= RCU_FANOUT #if NR_CPUS <= RCU_FANOUT
# define NUM_RCU_LVLS 1 # define NUM_RCU_LVLS 1
...@@ -45,23 +46,33 @@ ...@@ -45,23 +46,33 @@
# define NUM_RCU_LVL_1 (NR_CPUS) # define NUM_RCU_LVL_1 (NR_CPUS)
# define NUM_RCU_LVL_2 0 # define NUM_RCU_LVL_2 0
# define NUM_RCU_LVL_3 0 # define NUM_RCU_LVL_3 0
# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_SQ #elif NR_CPUS <= RCU_FANOUT_SQ
# define NUM_RCU_LVLS 2 # define NUM_RCU_LVLS 2
# define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
# define NUM_RCU_LVL_2 (NR_CPUS) # define NUM_RCU_LVL_2 (NR_CPUS)
# define NUM_RCU_LVL_3 0 # define NUM_RCU_LVL_3 0
# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_CUBE #elif NR_CPUS <= RCU_FANOUT_CUBE
# define NUM_RCU_LVLS 3 # define NUM_RCU_LVLS 3
# define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
# define NUM_RCU_LVL_3 NR_CPUS # define NUM_RCU_LVL_3 NR_CPUS
# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_FOURTH
# define NUM_RCU_LVLS 4
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
# define NUM_RCU_LVL_4 NR_CPUS
#else #else
# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
#endif /* #if (NR_CPUS) <= RCU_FANOUT */ #endif /* #if (NR_CPUS) <= RCU_FANOUT */
#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) #define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
/* /*
...@@ -84,14 +95,21 @@ struct rcu_node { ...@@ -84,14 +95,21 @@ struct rcu_node {
long gpnum; /* Current grace period for this node. */ long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */ /* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */ /* behind the root rcu_node's gpnum. */
long completed; /* Last grace period completed for this node. */
/* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */
unsigned long qsmask; /* CPUs or groups that need to switch in */ unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/ /* order for current grace period to proceed.*/
/* In leaf rcu_node, each bit corresponds to */ /* In leaf rcu_node, each bit corresponds to */
/* an rcu_data structure, otherwise, each */ /* an rcu_data structure, otherwise, each */
/* bit corresponds to a child rcu_node */ /* bit corresponds to a child rcu_node */
/* structure. */ /* structure. */
unsigned long expmask; /* Groups that have ->blocked_tasks[] */
/* elements that need to drain to allow the */
/* current expedited grace period to */
/* complete (only for TREE_PREEMPT_RCU). */
unsigned long qsmaskinit; unsigned long qsmaskinit;
/* Per-GP initialization for qsmask. */ /* Per-GP initial value for qsmask & expmask. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */ /* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */ int grplo; /* lowest-numbered CPU or group here. */
...@@ -99,7 +117,7 @@ struct rcu_node { ...@@ -99,7 +117,7 @@ struct rcu_node {
u8 grpnum; /* CPU/group number for next level up. */ u8 grpnum; /* CPU/group number for next level up. */
u8 level; /* root is at level 0. */ u8 level; /* root is at level 0. */
struct rcu_node *parent; struct rcu_node *parent;
struct list_head blocked_tasks[2]; struct list_head blocked_tasks[4];
/* Tasks blocked in RCU read-side critsect. */ /* Tasks blocked in RCU read-side critsect. */
/* Grace period number (->gpnum) x blocked */ /* Grace period number (->gpnum) x blocked */
/* by tasks on the (x & 0x1) element of the */ /* by tasks on the (x & 0x1) element of the */
...@@ -114,6 +132,21 @@ struct rcu_node { ...@@ -114,6 +132,21 @@ struct rcu_node {
for ((rnp) = &(rsp)->node[0]; \ for ((rnp) = &(rsp)->node[0]; \
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
/*
* Do a breadth-first scan of the non-leaf rcu_node structures for the
* specified rcu_state structure. Note that if there is a singleton
* rcu_node tree with but one rcu_node structure, this loop is a no-op.
*/
#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
for ((rnp) = &(rsp)->node[0]; \
(rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
/*
* Scan the leaves of the rcu_node hierarchy for the specified rcu_state
* structure. Note that if there is a singleton rcu_node tree with but
* one rcu_node structure, this loop -will- visit the rcu_node structure.
* It is still a leaf node, even if it is also the root node.
*/
#define rcu_for_each_leaf_node(rsp, rnp) \ #define rcu_for_each_leaf_node(rsp, rnp) \
for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
...@@ -204,11 +237,12 @@ struct rcu_data { ...@@ -204,11 +237,12 @@ struct rcu_data {
#define RCU_GP_IDLE 0 /* No grace period in progress. */ #define RCU_GP_IDLE 0 /* No grace period in progress. */
#define RCU_GP_INIT 1 /* Grace period being initialized. */ #define RCU_GP_INIT 1 /* Grace period being initialized. */
#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ #define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
#else /* #ifdef CONFIG_NO_HZ */ #else /* #ifdef CONFIG_NO_HZ */
#define RCU_SIGNAL_INIT RCU_FORCE_QS #define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
#endif /* #else #ifdef CONFIG_NO_HZ */ #endif /* #else #ifdef CONFIG_NO_HZ */
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
...@@ -246,7 +280,7 @@ struct rcu_state { ...@@ -246,7 +280,7 @@ struct rcu_state {
long gpnum; /* Current gp number. */ long gpnum; /* Current gp number. */
long completed; /* # of last completed gp. */ long completed; /* # of last completed gp. */
/* End of fields guarded by root rcu_node's lock. */ /* End of fields guarded by root rcu_node's lock. */
spinlock_t onofflock; /* exclude on/offline and */ spinlock_t onofflock; /* exclude on/offline and */
/* starting new GP. Also */ /* starting new GP. Also */
...@@ -260,6 +294,8 @@ struct rcu_state { ...@@ -260,6 +294,8 @@ struct rcu_state {
long orphan_qlen; /* Number of orphaned cbs. */ long orphan_qlen; /* Number of orphaned cbs. */
spinlock_t fqslock; /* Only one task forcing */ spinlock_t fqslock; /* Only one task forcing */
/* quiescent states. */ /* quiescent states. */
long completed_fqs; /* Value of completed @ snap. */
/* Protected by fqslock. */
unsigned long jiffies_force_qs; /* Time at which to invoke */ unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */ /* force_quiescent_state(). */
unsigned long n_force_qs; /* Number of calls to */ unsigned long n_force_qs; /* Number of calls to */
...@@ -274,11 +310,15 @@ struct rcu_state { ...@@ -274,11 +310,15 @@ struct rcu_state {
unsigned long jiffies_stall; /* Time at which to check */ unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */ /* for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
#ifdef CONFIG_NO_HZ
long dynticks_completed; /* Value of completed @ snap. */
#endif /* #ifdef CONFIG_NO_HZ */
}; };
/* Return values for rcu_preempt_offline_tasks(). */
#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
/* GP were moved to root. */
#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
/* GP were moved to root. */
#ifdef RCU_TREE_NONCORE #ifdef RCU_TREE_NONCORE
/* /*
...@@ -298,10 +338,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); ...@@ -298,10 +338,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
#else /* #ifdef RCU_TREE_NONCORE */ #else /* #ifdef RCU_TREE_NONCORE */
/* Forward declarations for rcutree_plugin.h */ /* Forward declarations for rcutree_plugin.h */
static inline void rcu_bootup_announce(void); static void rcu_bootup_announce(void);
long rcu_batches_completed(void); long rcu_batches_completed(void);
static void rcu_preempt_note_context_switch(int cpu); static void rcu_preempt_note_context_switch(int cpu);
static int rcu_preempted_readers(struct rcu_node *rnp); static int rcu_preempted_readers(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
unsigned long flags);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
static void rcu_print_task_stall(struct rcu_node *rnp); static void rcu_print_task_stall(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
...@@ -315,6 +359,9 @@ static void rcu_preempt_offline_cpu(int cpu); ...@@ -315,6 +359,9 @@ static void rcu_preempt_offline_cpu(int cpu);
static void rcu_preempt_check_callbacks(int cpu); static void rcu_preempt_check_callbacks(int cpu);
static void rcu_preempt_process_callbacks(void); static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
static int rcu_preempt_pending(int cpu); static int rcu_preempt_pending(int cpu);
static int rcu_preempt_needs_cpu(int cpu); static int rcu_preempt_needs_cpu(int cpu);
static void __cpuinit rcu_preempt_init_percpu_data(int cpu); static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
......
This diff is collapsed.
...@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = { ...@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = {
static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
{ {
long gpnum;
int level = 0; int level = 0;
int phase;
struct rcu_node *rnp; struct rcu_node *rnp;
gpnum = rsp->gpnum;
seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
rsp->completed, rsp->gpnum, rsp->signaled, rsp->completed, gpnum, rsp->signaled,
(long)(rsp->jiffies_force_qs - jiffies), (long)(rsp->jiffies_force_qs - jiffies),
(int)(jiffies & 0xffff), (int)(jiffies & 0xffff),
rsp->n_force_qs, rsp->n_force_qs_ngp, rsp->n_force_qs, rsp->n_force_qs_ngp,
...@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) ...@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n"); seq_puts(m, "\n");
level = rnp->level; level = rnp->level;
} }
seq_printf(m, "%lx/%lx %d:%d ^%d ", phase = gpnum & 0x1;
seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit, rnp->qsmask, rnp->qsmaskinit,
"T."[list_empty(&rnp->blocked_tasks[phase])],
"E."[list_empty(&rnp->blocked_tasks[phase + 2])],
"T."[list_empty(&rnp->blocked_tasks[!phase])],
"E."[list_empty(&rnp->blocked_tasks[!phase + 2])],
rnp->grplo, rnp->grphi, rnp->grpnum); rnp->grplo, rnp->grphi, rnp->grpnum);
} }
seq_puts(m, "\n"); seq_puts(m, "\n");
......
...@@ -10901,6 +10901,7 @@ void synchronize_sched_expedited(void) ...@@ -10901,6 +10901,7 @@ void synchronize_sched_expedited(void)
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
} }
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
synchronize_sched_expedited_count++;
mutex_unlock(&rcu_sched_expedited_mutex); mutex_unlock(&rcu_sched_expedited_mutex);
put_online_cpus(); put_online_cpus();
if (need_full_sync) if (need_full_sync)
......
...@@ -302,9 +302,9 @@ void irq_exit(void) ...@@ -302,9 +302,9 @@ void irq_exit(void)
if (!in_interrupt() && local_softirq_pending()) if (!in_interrupt() && local_softirq_pending())
invoke_softirq(); invoke_softirq();
rcu_irq_exit();
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */ /* Make sure that timer wheel updates are propagated */
rcu_irq_exit();
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
tick_nohz_stop_sched_tick(0); tick_nohz_stop_sched_tick(0);
#endif #endif
......
...@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp) ...@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp)
sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
return (sp->per_cpu_ref ? 0 : -ENOMEM); return (sp->per_cpu_ref ? 0 : -ENOMEM);
} }
EXPORT_SYMBOL_GPL(init_srcu_struct);
/* /*
* srcu_readers_active_idx -- returns approximate number of readers * srcu_readers_active_idx -- returns approximate number of readers
...@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) ...@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
free_percpu(sp->per_cpu_ref); free_percpu(sp->per_cpu_ref);
sp->per_cpu_ref = NULL; sp->per_cpu_ref = NULL;
} }
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/** /**
* srcu_read_lock - register a new reader for an SRCU-protected structure. * srcu_read_lock - register a new reader for an SRCU-protected structure.
...@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp) ...@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp)
preempt_enable(); preempt_enable();
return idx; return idx;
} }
EXPORT_SYMBOL_GPL(srcu_read_lock);
/** /**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure. * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
...@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx) ...@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx)
per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL_GPL(srcu_read_unlock);
/** /*
* synchronize_srcu - wait for prior SRCU read-side critical-section completion * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchornize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock.
* However, it is perfectly legal to call synchronize_srcu() on one
* srcu_struct from some other srcu_struct's read-side critical section.
*/ */
void synchronize_srcu(struct srcu_struct *sp) void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
{ {
int idx; int idx;
...@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp) ...@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp)
return; return;
} }
synchronize_sched(); /* Force memory barrier on all CPUs. */ sync_func(); /* Force memory barrier on all CPUs. */
/* /*
* The preceding synchronize_sched() ensures that any CPU that * The preceding synchronize_sched() ensures that any CPU that
...@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp) ...@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp)
idx = sp->completed & 0x1; idx = sp->completed & 0x1;
sp->completed++; sp->completed++;
synchronize_sched(); /* Force memory barrier on all CPUs. */ sync_func(); /* Force memory barrier on all CPUs. */
/* /*
* At this point, because of the preceding synchronize_sched(), * At this point, because of the preceding synchronize_sched(),
...@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp) ...@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp)
while (srcu_readers_active_idx(sp, idx)) while (srcu_readers_active_idx(sp, idx))
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
synchronize_sched(); /* Force memory barrier on all CPUs. */ sync_func(); /* Force memory barrier on all CPUs. */
/* /*
* The preceding synchronize_sched() forces all srcu_read_unlock() * The preceding synchronize_sched() forces all srcu_read_unlock()
...@@ -236,6 +229,47 @@ void synchronize_srcu(struct srcu_struct *sp) ...@@ -236,6 +229,47 @@ void synchronize_srcu(struct srcu_struct *sp)
mutex_unlock(&sp->mutex); mutex_unlock(&sp->mutex);
} }
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchronize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock.
* However, it is perfectly legal to call synchronize_srcu() on one
* srcu_struct from some other srcu_struct's read-side critical section.
*/
void synchronize_srcu(struct srcu_struct *sp)
{
__synchronize_srcu(sp, synchronize_sched);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
/**
* synchronize_srcu_expedited - like synchronize_srcu, but less patient
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchronize_srcu_expedited()
* from the corresponding SRCU read-side critical section; doing so
* will result in deadlock. However, it is perfectly legal to call
* synchronize_srcu_expedited() on one srcu_struct from some other
* srcu_struct's read-side critical section.
*/
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
__synchronize_srcu(sp, synchronize_sched_expedited);
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/** /**
* srcu_batches_completed - return batches completed. * srcu_batches_completed - return batches completed.
* @sp: srcu_struct on which to report batch completion. * @sp: srcu_struct on which to report batch completion.
...@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp) ...@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
{ {
return sp->completed; return sp->completed;
} }
EXPORT_SYMBOL_GPL(init_srcu_struct);
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
EXPORT_SYMBOL_GPL(srcu_read_lock);
EXPORT_SYMBOL_GPL(srcu_read_unlock);
EXPORT_SYMBOL_GPL(synchronize_srcu);
EXPORT_SYMBOL_GPL(srcu_batches_completed); EXPORT_SYMBOL_GPL(srcu_batches_completed);
...@@ -750,7 +750,7 @@ config RCU_TORTURE_TEST_RUNNABLE ...@@ -750,7 +750,7 @@ config RCU_TORTURE_TEST_RUNNABLE
config RCU_CPU_STALL_DETECTOR config RCU_CPU_STALL_DETECTOR
bool "Check for stalled CPUs delaying RCU grace periods" bool "Check for stalled CPUs delaying RCU grace periods"
depends on TREE_RCU || TREE_PREEMPT_RCU depends on TREE_RCU || TREE_PREEMPT_RCU
default n default y
help help
This option causes RCU to printk information on which This option causes RCU to printk information on which
CPUs are delaying the current grace period, but only when CPUs are delaying the current grace period, but only when
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment