Commit e3aa52d6 authored by Avi Kivity's avatar Avi Kivity

Merge commit '29ce8310' into next

* commit '29ce8310': (34 commits)
  rcu: provide rcu_virt_note_context_switch() function.
  rcu: get rid of signed overflow in check_cpu_stall()
  rcu: optimize rcutiny
  rcu: prevent call_rcu() from diving into rcu core if irqs disabled
  rcu: further lower priority in rcu_yield()
  rcu: introduce kfree_rcu()
  rcu: fix spelling
  rcu: call __rcu_read_unlock() in exit_rcu for tree RCU
  rcu: Converge TINY_RCU expedited and normal boosting
  rcu: remove useless ->boosted_this_gp field
  rcu: code cleanups in TINY_RCU priority boosting.
  rcu: Switch to this_cpu() primitives
  rcu: Use WARN_ON_ONCE for DEBUG_OBJECTS_RCU_HEAD warnings
  rcu: mark rcutorture boosting callback as being on-stack
  rcu: add DEBUG_OBJECTS_RCU_HEAD check for alignment
  rcu: Enable DEBUG_OBJECTS_RCU_HEAD from !PREEMPT
  rcu: Add forward-progress diagnostic for per-CPU kthreads
  rcu: add grace-period age and more kthread state to tracing
  rcu: fix tracing bug thinko on boost-balk attribution
  rcu: update tracing documentation for new rcutorture and rcuboost
  ...

Pulling in rcu_virt_note_context_switch().
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>

* commit '29ce8310': (34 commits)
  rcu: provide rcu_virt_note_context_switch() function.
  rcu: get rid of signed overflow in check_cpu_stall()
  rcu: optimize rcutiny
  rcu: prevent call_rcu() from diving into rcu core if irqs disabled
  rcu: further lower priority in rcu_yield()
  rcu: introduce kfree_rcu()
  rcu: fix spelling
  rcu: call __rcu_read_unlock() in exit_rcu for tree RCU
  rcu: Converge TINY_RCU expedited and normal boosting
  rcu: remove useless ->boosted_this_gp field
  rcu: code cleanups in TINY_RCU priority boosting.
  rcu: Switch to this_cpu() primitives
  rcu: Use WARN_ON_ONCE for DEBUG_OBJECTS_RCU_HEAD warnings
  rcu: mark rcutorture boosting callback as being on-stack
  rcu: add DEBUG_OBJECTS_RCU_HEAD check for alignment
  rcu: Enable DEBUG_OBJECTS_RCU_HEAD from !PREEMPT
  rcu: Add forward-progress diagnostic for per-CPU kthreads
  rcu: add grace-period age and more kthread state to tracing
  rcu: fix tracing bug thinko on boost-balk attribution
  rcu: update tracing documentation for new rcutorture and rcuboost
  ...
parents d2f62766 29ce8310
......@@ -21,7 +21,7 @@ rcu.txt
RTFP.txt
- List of RCU papers (bibliography) going back to 1980.
stallwarn.txt
- RCU CPU stall warnings (CONFIG_RCU_CPU_STALL_DETECTOR)
- RCU CPU stall warnings (module parameter rcu_cpu_stall_suppress)
torture.txt
- RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
trace.txt
......
Using RCU's CPU Stall Detector
The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables
RCU's CPU stall detector, which detects conditions that unduly delay
RCU grace periods. The stall detector's idea of what constitutes
"unduly delayed" is controlled by a set of C preprocessor macros:
The rcu_cpu_stall_suppress module parameter enables RCU's CPU stall
detector, which detects conditions that unduly delay RCU grace periods.
This module parameter enables CPU stall detection by default, but
may be overridden via boot-time parameter or at runtime via sysfs.
The stall detector's idea of what constitutes "unduly delayed" is
controlled by a set of kernel configuration variables and cpp macros:
RCU_SECONDS_TILL_STALL_CHECK
CONFIG_RCU_CPU_STALL_TIMEOUT
This macro defines the period of time that RCU will wait from
the beginning of a grace period until it issues an RCU CPU
stall warning. This time period is normally ten seconds.
This kernel configuration parameter defines the period of time
that RCU will wait from the beginning of a grace period until it
issues an RCU CPU stall warning. This time period is normally
ten seconds.
RCU_SECONDS_TILL_STALL_RECHECK
This macro defines the period of time that RCU will wait after
issuing a stall warning until it issues another stall warning
for the same stall. This time period is normally set to thirty
seconds.
for the same stall. This time period is normally set to three
times the check interval plus thirty seconds.
RCU_STALL_RAT_DELAY
......
This diff is collapsed.
......@@ -836,7 +836,6 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
TASKLET: 0 0 0 290
SCHED: 27035 26983 26971 26746
HRTIMER: 0 0 0 0
RCU: 1678 1769 2178 2250
1.3 IDE devices in /proc/ide
......
......@@ -414,7 +414,6 @@ enum
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
};
......
......@@ -47,6 +47,18 @@
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
#else
static inline void rcutorture_record_test_transition(void)
{
}
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
#endif
#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b))
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
......@@ -68,7 +80,6 @@ extern void call_rcu_sched(struct rcu_head *head,
extern void synchronize_sched(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern int sched_expedited_torture_stats(char *page);
static inline void __rcu_read_lock_bh(void)
{
......@@ -774,6 +785,7 @@ extern struct debug_obj_descr rcuhead_debug_descr;
static inline void debug_rcu_head_queue(struct rcu_head *head)
{
WARN_ON_ONCE((unsigned long)head & 0x3);
debug_object_activate(head, &rcuhead_debug_descr);
debug_object_active_state(head, &rcuhead_debug_descr,
STATE_RCU_HEAD_READY,
......@@ -797,4 +809,60 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
{
return offset < 4096;
}
static __always_inline
void __kfree_rcu(struct rcu_head *head, unsigned long offset)
{
typedef void (*rcu_callback)(struct rcu_head *);
BUILD_BUG_ON(!__builtin_constant_p(offset));
/* See the kfree_rcu() header comment. */
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
call_rcu(head, (rcu_callback)offset);
}
extern void kfree(const void *);
static inline void __rcu_reclaim(struct rcu_head *head)
{
unsigned long offset = (unsigned long)head->func;
if (__is_kfree_rcu_offset(offset))
kfree((void *)head - offset);
else
head->func(head);
}
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree
* @rcu_head: the name of the struct rcu_head within the type of @ptr.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
* when they are used in a kernel module, that module must invoke the
* high-latency rcu_barrier() function at module-unload time.
*
* The kfree_rcu() function handles this issue. Rather than encoding a
* function address in the embedded rcu_head structure, kfree_rcu() instead
* encodes the offset of the rcu_head structure within the base structure.
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
* be generated in __kfree_rcu(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
* Note that the allowable offset might decrease in the future, for example,
* to allow something like kmem_cache_free_rcu().
*/
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#endif /* __LINUX_RCUPDATE_H */
......@@ -99,6 +99,14 @@ static inline void rcu_note_context_switch(int cpu)
rcu_preempt_note_context_switch();
}
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
}
/*
* Return the number of grace periods.
*/
......
......@@ -35,6 +35,16 @@ extern void rcu_note_context_switch(int cpu);
extern int rcu_needs_cpu(int cpu);
extern void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
* to save a few bytes.
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
rcu_note_context_switch(cpu);
}
#ifdef CONFIG_TREE_PREEMPT_RCU
extern void exit_rcu(void);
......@@ -58,9 +68,12 @@ static inline void synchronize_rcu_bh_expedited(void)
extern void rcu_barrier(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
......
......@@ -20,8 +20,7 @@ struct softirq_action;
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
softirq_name(HRTIMER))
/**
* irq_handler_entry - called immediately before the irq action handler
......
......@@ -485,7 +485,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && TINY_PREEMPT_RCU
depends on RT_MUTEXES && PREEMPT_RCU
default n
help
This option boosts the priority of preempted RCU readers that
......
......@@ -142,10 +142,17 @@ static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
* In !PREEMPT configurations, there is no way to tell if we are
* in a RCU read-side critical section or not, so we never
* attempt any fixup and just print a warning.
*/
#ifndef CONFIG_PREEMPT
WARN_ON_ONCE(1);
return 0;
#endif
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON(1);
WARN_ON_ONCE(1);
return 0;
}
rcu_barrier();
......@@ -184,10 +191,17 @@ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
* In !PREEMPT configurations, there is no way to tell if we are
* in a RCU read-side critical section or not, so we never
* attempt any fixup and just print a warning.
*/
#ifndef CONFIG_PREEMPT
WARN_ON_ONCE(1);
return 0;
#endif
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON(1);
WARN_ON_ONCE(1);
return 0;
}
rcu_barrier();
......@@ -214,15 +228,17 @@ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
* Note that the machinery to reliably determine whether
* or not we are in an RCU read-side critical section
* exists only in the preemptible RCU implementations
* (TINY_PREEMPT_RCU and TREE_PREEMPT_RCU), which is why
* DEBUG_OBJECTS_RCU_HEAD is disallowed if !PREEMPT.
* In !PREEMPT configurations, there is no way to tell if we are
* in a RCU read-side critical section or not, so we never
* attempt any fixup and just print a warning.
*/
#ifndef CONFIG_PREEMPT
WARN_ON_ONCE(1);
return 0;
#endif
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON(1);
WARN_ON_ONCE(1);
return 0;
}
rcu_barrier();
......
......@@ -40,10 +40,10 @@
static struct task_struct *rcu_kthread_task;
static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
static unsigned long have_rcu_kthread_work;
static void invoke_rcu_kthread(void);
/* Forward declarations for rcutiny_plugin.h. */
struct rcu_ctrlblk;
static void invoke_rcu_kthread(void);
static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
static int rcu_kthread(void *arg);
static void __call_rcu(struct rcu_head *head,
......@@ -79,26 +79,31 @@ void rcu_exit_nohz(void)
#endif /* #ifdef CONFIG_NO_HZ */
/*
* Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
* Also disable irqs to avoid confusion due to interrupt handlers
* Helper function for rcu_sched_qs() and rcu_bh_qs().
* Also irqs are disabled to avoid confusion due to interrupt handlers
* invoking call_rcu().
*/
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{
unsigned long flags;
local_irq_save(flags);
if (rcp->rcucblist != NULL &&
rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail;
local_irq_restore(flags);
return 1;
}
local_irq_restore(flags);
return 0;
}
/*
* Wake up rcu_kthread() to process callbacks now eligible for invocation
* or to boost readers.
*/
static void invoke_rcu_kthread(void)
{
have_rcu_kthread_work = 1;
wake_up(&rcu_kthread_wq);
}
/*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh
......@@ -106,9 +111,13 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
*/
void rcu_sched_qs(int cpu)
{
unsigned long flags;
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
rcu_qsctr_help(&rcu_bh_ctrlblk))
invoke_rcu_kthread();
local_irq_restore(flags);
}
/*
......@@ -116,8 +125,12 @@ void rcu_sched_qs(int cpu)
*/
void rcu_bh_qs(int cpu)
{
unsigned long flags;
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
invoke_rcu_kthread();
local_irq_restore(flags);
}
/*
......@@ -167,7 +180,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
prefetch(next);
debug_rcu_head_unqueue(list);
local_bh_disable();
list->func(list);
__rcu_reclaim(list);
local_bh_enable();
list = next;
RCU_TRACE(cb_count++);
......@@ -207,20 +220,6 @@ static int rcu_kthread(void *arg)
return 0; /* Not reached, but needed to shut gcc up. */
}
/*
* Wake up rcu_kthread() to process callbacks now eligible for invocation
* or to boost readers.
*/
static void invoke_rcu_kthread(void)
{
unsigned long flags;
local_irq_save(flags);
have_rcu_kthread_work = 1;
wake_up(&rcu_kthread_wq);
local_irq_restore(flags);
}
/*
* Wait for a grace period to elapse. But it is illegal to invoke
* synchronize_sched() from within an RCU read-side critical section.
......
This diff is collapsed.
......@@ -131,7 +131,7 @@ struct rcu_torture {
static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture __rcu *rcu_torture_current;
static long rcu_torture_current_version;
static unsigned long rcu_torture_current_version;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
......@@ -146,8 +146,6 @@ static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_boost_ktrerror;
static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_allocerror;
static long n_rcu_torture_boost_afferror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
......@@ -163,11 +161,11 @@ static int stutter_pause_test;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
#ifdef CONFIG_RCU_BOOST
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
#else /* #ifdef CONFIG_RCU_BOOST */
#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
#define rcu_can_boost() 0
#endif /* #else #ifdef CONFIG_RCU_BOOST */
#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
static unsigned long boost_starttime; /* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
......@@ -751,6 +749,7 @@ static int rcu_torture_boost(void *arg)
n_rcu_torture_boost_rterror++;
}
init_rcu_head_on_stack(&rbi.rcu);
/* Each pass through the following loop does one boost-test cycle. */
do {
/* Wait for the next test interval. */
......@@ -810,6 +809,7 @@ checkwait: rcu_stutter_wait("rcu_torture_boost");
/* Clean up and exit. */
VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
destroy_rcu_head_on_stack(&rbi.rcu);
rcutorture_shutdown_absorb("rcu_torture_boost");
while (!kthread_should_stop() || rbi.inflight)
schedule_timeout_uninterruptible(1);
......@@ -886,7 +886,7 @@ rcu_torture_writer(void *arg)
old_rp->rtort_pipe_count++;
cur_ops->deferred_free(old_rp);
}
rcu_torture_current_version++;
rcutorture_record_progress(++rcu_torture_current_version);
oldbatch = cur_ops->completed();
rcu_stutter_wait("rcu_torture_writer");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
......@@ -1066,8 +1066,8 @@ rcu_torture_printk(char *page)
}
cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt],
"rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d rtbke: %ld rtbre: %ld rtbae: %ld rtbafe: %ld "
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d rtbke: %ld rtbre: %ld "
"rtbf: %ld rtb: %ld nt: %ld",
rcu_torture_current,
rcu_torture_current_version,
......@@ -1078,16 +1078,12 @@ rcu_torture_printk(char *page)
atomic_read(&n_rcu_torture_mberror),
n_rcu_torture_boost_ktrerror,
n_rcu_torture_boost_rterror,
n_rcu_torture_boost_allocerror,
n_rcu_torture_boost_afferror,
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
n_rcu_torture_timers);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
n_rcu_torture_boost_ktrerror != 0 ||
n_rcu_torture_boost_rterror != 0 ||
n_rcu_torture_boost_allocerror != 0 ||
n_rcu_torture_boost_afferror != 0 ||
n_rcu_torture_boost_failure != 0)
cnt += sprintf(&page[cnt], " !!!");
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
......@@ -1331,6 +1327,7 @@ rcu_torture_cleanup(void)
int i;
mutex_lock(&fullstop_mutex);
rcutorture_record_test_transition();
if (fullstop == FULLSTOP_SHUTDOWN) {
printk(KERN_WARNING /* but going down anyway, so... */
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
......@@ -1486,8 +1483,6 @@ rcu_torture_init(void)
atomic_set(&n_rcu_torture_error, 0);
n_rcu_torture_boost_ktrerror = 0;
n_rcu_torture_boost_rterror = 0;
n_rcu_torture_boost_allocerror = 0;
n_rcu_torture_boost_afferror = 0;
n_rcu_torture_boost_failure = 0;
n_rcu_torture_boosts = 0;
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
......@@ -1624,6 +1619,7 @@ rcu_torture_init(void)
}
}
register_reboot_notifier(&rcutorture_shutdown_nb);
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
return 0;
......
This diff is collapsed.
......@@ -84,13 +84,19 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
int dynticks_nesting; /* Track nesting level, sort of. */
int dynticks; /* Even value for dynticks-idle, else odd. */
int dynticks_nmi; /* Even value for either dynticks-idle or */
/* not in nmi handler, else odd. So this */
/* remains even for nmi from irq handler. */
int dynticks_nesting; /* Track irq/process nesting level. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
};
/* RCU's kthread states for tracing. */
#define RCU_KTHREAD_STOPPED 0
#define RCU_KTHREAD_RUNNING 1
#define RCU_KTHREAD_WAITING 2
#define RCU_KTHREAD_OFFCPU 3
#define RCU_KTHREAD_YIELDING 4
#define RCU_KTHREAD_MAX 4
/*
* Definition for node within the RCU grace-period-detection hierarchy.
*/
......@@ -109,10 +115,11 @@ struct rcu_node {
/* an rcu_data structure, otherwise, each */
/* bit corresponds to a child rcu_node */
/* structure. */
unsigned long expmask; /* Groups that have ->blocked_tasks[] */
unsigned long expmask; /* Groups that have ->blkd_tasks */
/* elements that need to drain to allow the */
/* current expedited grace period to */
/* complete (only for TREE_PREEMPT_RCU). */
unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
......@@ -122,11 +129,68 @@ struct rcu_node {
u8 grpnum; /* CPU/group number for next level up. */
u8 level; /* root is at level 0. */
struct rcu_node *parent;
struct list_head blocked_tasks[4];
/* Tasks blocked in RCU read-side critsect. */
/* Grace period number (->gpnum) x blocked */
/* by tasks on the (x & 0x1) element of the */
/* blocked_tasks[] array. */
struct list_head blkd_tasks;
/* Tasks blocked in RCU read-side critical */
/* section. Tasks are placed at the head */
/* of this list and age towards the tail. */
struct list_head *gp_tasks;
/* Pointer to the first task blocking the */
/* current grace period, or NULL if there */
/* is no such task. */
struct list_head *exp_tasks;
/* Pointer to the first task blocking the */
/* current expedited grace period, or NULL */
/* if there is no such task. If there */
/* is no current expedited grace period, */
/* then there can cannot be any such task. */
#ifdef CONFIG_RCU_BOOST
struct list_head *boost_tasks;
/* Pointer to first task that needs to be */
/* priority boosted, or NULL if no priority */
/* boosting is needed for this rcu_node */
/* structure. If there are no tasks */
/* queued on this rcu_node structure that */
/* are blocking the current grace period, */
/* there can be no such task. */
unsigned long boost_time;
/* When to start boosting (jiffies). */
struct task_struct *boost_kthread_task;
/* kthread that takes care of priority */
/* boosting for this rcu_node structure. */
wait_queue_head_t boost_wq;
/* Wait queue on which to park the boost */
/* kthread. */
unsigned int boost_kthread_status;
/* State of boost_kthread_task for tracing. */
unsigned long n_tasks_boosted;
/* Total number of tasks boosted. */
unsigned long n_exp_boosts;
/* Number of tasks boosted for expedited GP. */
unsigned long n_normal_boosts;
/* Number of tasks boosted for normal GP. */
unsigned long n_balk_blkd_tasks;
/* Refused to boost: no blocked tasks. */
unsigned long n_balk_exp_gp_tasks;
/* Refused to boost: nothing blocking GP. */
unsigned long n_balk_boost_tasks;
/* Refused to boost: already boosting. */
unsigned long n_balk_notblocked;
/* Refused to boost: RCU RS CS still running. */
unsigned long n_balk_notyet;
/* Refused to boost: not yet time. */
unsigned long n_balk_nos;
/* Refused to boost: not sure why, though. */
/* This can happen due to race conditions. */
#endif /* #ifdef CONFIG_RCU_BOOST */
struct task_struct *node_kthread_task;
/* kthread that takes care of this rcu_node */
/* structure, for example, awakening the */
/* per-CPU kthreads as needed. */
wait_queue_head_t node_wq;
/* Wait queue on which to park the per-node */
/* kthread. */
unsigned int node_kthread_status;
/* State of node_kthread_task for tracing. */
} ____cacheline_internodealigned_in_smp;
/*
......@@ -175,7 +239,7 @@ struct rcu_data {
bool passed_quiesc; /* User-mode/idle loop etc. */
bool qs_pending; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
bool preemptable; /* Preemptable RCU? */
bool preemptible; /* Preemptible RCU? */
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
......@@ -218,7 +282,6 @@ struct rcu_data {
/* 3) dynticks interface. */
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
#endif /* #ifdef CONFIG_NO_HZ */
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
......@@ -254,7 +317,6 @@ struct rcu_data {
#endif /* #else #ifdef CONFIG_NO_HZ */
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
#ifdef CONFIG_PROVE_RCU
#define RCU_STALL_DELAY_DELTA (5 * HZ)
......@@ -272,13 +334,6 @@ struct rcu_data {
/* scheduling clock irq */
/* before ratting on them. */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE
#define RCU_CPU_STALL_SUPPRESS_INIT 0
#else
#define RCU_CPU_STALL_SUPPRESS_INIT 1
#endif
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
* RCU global state, including node hierarchy. This hierarchy is
......@@ -325,12 +380,12 @@ struct rcu_state {
/* due to lock unavailable. */
unsigned long n_force_qs_ngp; /* Number of calls leaving */
/* due to no GP active. */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
unsigned long gp_start; /* Time at which GP started, */
/* but in jiffies. */
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */
char *name; /* Name of structure. */
};
......@@ -361,16 +416,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
static void rcu_bootup_announce(void);
long rcu_batches_completed(void);
static void rcu_preempt_note_context_switch(int cpu);
static int rcu_preempted_readers(struct rcu_node *rnp);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
unsigned long flags);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static void rcu_print_task_stall(struct rcu_node *rnp);
static void rcu_preempt_stall_reset(void);
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
......@@ -390,5 +443,16 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void);
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
static void rcu_initiate_boost(struct rcu_node *rnp);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_stop_boost_kthread(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#endif /* #ifndef RCU_TREE_NONCORE */
This diff is collapsed.
......@@ -46,6 +46,18 @@
#define RCU_TREE_NONCORE
#include "rcutree.h"
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DECLARE_PER_CPU(char, rcu_cpu_has_work);
static char convert_kthread_status(unsigned int kthread_status)
{
if (kthread_status > RCU_KTHREAD_MAX)
return '?';
return "SRWOY"[kthread_status];
}
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
......@@ -57,14 +69,28 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, " dt=%d/%d dn=%d df=%lu",
rdp->dynticks->dynticks,
seq_printf(m, " dt=%d/%d/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi,
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit);
seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
rdp->qlen,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
rdp->nxttail[RCU_NEXT_TAIL]],
".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
rdp->nxttail[RCU_NEXT_READY_TAIL]],
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
rdp->nxttail[RCU_WAIT_TAIL]],
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
per_cpu(rcu_cpu_has_work, rdp->cpu),
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
rdp->cpu)),
per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
rdp->blimit);
seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
......@@ -115,13 +141,24 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu",
rdp->dynticks->dynticks,
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi,
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit);
seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
rdp->nxttail[RCU_NEXT_TAIL]],
".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
rdp->nxttail[RCU_NEXT_READY_TAIL]],
".W"[rdp->nxttail[RCU_DONE_TAIL] !=
rdp->nxttail[RCU_WAIT_TAIL]],
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
per_cpu(rcu_cpu_has_work, rdp->cpu),
convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
rdp->cpu)),
rdp->blimit);
seq_printf(m, ",%lu,%lu,%lu\n",
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
......@@ -130,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
{
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
#ifdef CONFIG_TREE_PREEMPT_RCU
......@@ -157,11 +194,76 @@ static const struct file_operations rcudata_csv_fops = {
.release = single_release,
};
#ifdef CONFIG_RCU_BOOST
static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
{
seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu "
"j=%04x bt=%04x\n",
rnp->grplo, rnp->grphi,
"T."[list_empty(&rnp->blkd_tasks)],
"N."[!rnp->gp_tasks],
"E."[!rnp->exp_tasks],
"B."[!rnp->boost_tasks],
convert_kthread_status(rnp->boost_kthread_status),
rnp->n_tasks_boosted, rnp->n_exp_boosts,
rnp->n_normal_boosts,
(int)(jiffies & 0xffff),
(int)(rnp->boost_time & 0xffff));
seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
" balk",
rnp->n_balk_blkd_tasks,
rnp->n_balk_exp_gp_tasks,
rnp->n_balk_boost_tasks,
rnp->n_balk_notblocked,
rnp->n_balk_notyet,
rnp->n_balk_nos);
}
static int show_rcu_node_boost(struct seq_file *m, void *unused)
{
struct rcu_node *rnp;
rcu_for_each_leaf_node(&rcu_preempt_state, rnp)
print_one_rcu_node_boost(m, rnp);
return 0;
}
static int rcu_node_boost_open(struct inode *inode, struct file *file)
{
return single_open(file, show_rcu_node_boost, NULL);
}
static const struct file_operations rcu_node_boost_fops = {
.owner = THIS_MODULE,
.open = rcu_node_boost_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* Create the rcuboost debugfs entry. Standard error return.
*/
static int rcu_boost_trace_create_file(struct dentry *rcudir)
{
return !debugfs_create_file("rcuboost", 0444, rcudir, NULL,
&rcu_node_boost_fops);
}
#else /* #ifdef CONFIG_RCU_BOOST */
static int rcu_boost_trace_create_file(struct dentry *rcudir)
{
return 0; /* There cannot be an error if we didn't create it! */
}
#endif /* #else #ifdef CONFIG_RCU_BOOST */
static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
{
unsigned long gpnum;
int level = 0;
int phase;
struct rcu_node *rnp;
gpnum = rsp->gpnum;
......@@ -178,13 +280,11 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n");
level = rnp->level;
}
phase = gpnum & 0x1;
seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ",
seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit,
"T."[list_empty(&rnp->blocked_tasks[phase])],
"E."[list_empty(&rnp->blocked_tasks[phase + 2])],
"T."[list_empty(&rnp->blocked_tasks[!phase])],
"E."[list_empty(&rnp->blocked_tasks[!phase + 2])],
".G"[rnp->gp_tasks != NULL],
".E"[rnp->exp_tasks != NULL],
".T"[!list_empty(&rnp->blkd_tasks)],
rnp->grplo, rnp->grphi, rnp->grpnum);
}
seq_puts(m, "\n");
......@@ -216,16 +316,35 @@ static const struct file_operations rcuhier_fops = {
.release = single_release,
};
static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
{
unsigned long flags;
unsigned long completed;
unsigned long gpnum;
unsigned long gpage;
unsigned long gpmax;
struct rcu_node *rnp = &rsp->node[0];
raw_spin_lock_irqsave(&rnp->lock, flags);
completed = rsp->completed;
gpnum = rsp->gpnum;
if (rsp->completed == rsp->gpnum)
gpage = 0;
else
gpage = jiffies - rsp->gp_start;
gpmax = rsp->gp_max;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
seq_printf(m, "%s: completed=%ld gpnum=%lu age=%ld max=%ld\n",
rsp->name, completed, gpnum, gpage, gpmax);
}
static int show_rcugp(struct seq_file *m, void *unused)
{
#ifdef CONFIG_TREE_PREEMPT_RCU
seq_printf(m, "rcu_preempt: completed=%ld gpnum=%lu\n",
rcu_preempt_state.completed, rcu_preempt_state.gpnum);
show_one_rcugp(m, &rcu_preempt_state);
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
seq_printf(m, "rcu_sched: completed=%ld gpnum=%lu\n",
rcu_sched_state.completed, rcu_sched_state.gpnum);
seq_printf(m, "rcu_bh: completed=%ld gpnum=%lu\n",
rcu_bh_state.completed, rcu_bh_state.gpnum);
show_one_rcugp(m, &rcu_sched_state);
show_one_rcugp(m, &rcu_bh_state);
return 0;
}
......@@ -298,6 +417,29 @@ static const struct file_operations rcu_pending_fops = {
.release = single_release,
};
static int show_rcutorture(struct seq_file *m, void *unused)
{
seq_printf(m, "rcutorture test sequence: %lu %s\n",
rcutorture_testseq >> 1,
(rcutorture_testseq & 0x1) ? "(test in progress)" : "");
seq_printf(m, "rcutorture update version number: %lu\n",
rcutorture_vernum);
return 0;
}
static int rcutorture_open(struct inode *inode, struct file *file)
{
return single_open(file, show_rcutorture, NULL);
}
static const struct file_operations rcutorture_fops = {
.owner = THIS_MODULE,
.open = rcutorture_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rcudir;
static int __init rcutree_trace_init(void)
......@@ -318,6 +460,9 @@ static int __init rcutree_trace_init(void)
if (!retval)
goto free_out;
if (rcu_boost_trace_create_file(rcudir))
goto free_out;
retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
if (!retval)
goto free_out;
......@@ -331,6 +476,11 @@ static int __init rcutree_trace_init(void)
NULL, &rcu_pending_fops);
if (!retval)
goto free_out;
retval = debugfs_create_file("rcutorture", 0444, rcudir,
NULL, &rcutorture_fops);
if (!retval)
goto free_out;
return 0;
free_out:
debugfs_remove_recursive(rcudir);
......
......@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
char *softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
"TASKLET", "SCHED", "HRTIMER"
};
/*
......
......@@ -337,7 +337,7 @@ config DEBUG_OBJECTS_WORK
config DEBUG_OBJECTS_RCU_HEAD
bool "Debug RCU callbacks objects"
depends on DEBUG_OBJECTS && PREEMPT
depends on DEBUG_OBJECTS
help
Enable this to turn on debugging of RCU list heads (call_rcu() usage).
......@@ -875,22 +875,9 @@ config RCU_TORTURE_TEST_RUNNABLE
Say N here if you want the RCU torture tests to start only
after being manually enabled via /proc.
config RCU_CPU_STALL_DETECTOR
bool "Check for stalled CPUs delaying RCU grace periods"
depends on TREE_RCU || TREE_PREEMPT_RCU
default y
help
This option causes RCU to printk information on which
CPUs are delaying the current grace period, but only when
the grace period extends for excessive time periods.
Say N if you want to disable such checks.
Say Y if you are unsure.
config RCU_CPU_STALL_TIMEOUT
int "RCU CPU stall timeout in seconds"
depends on RCU_CPU_STALL_DETECTOR
depends on TREE_RCU || TREE_PREEMPT_RCU
range 3 300
default 60
help
......@@ -899,22 +886,9 @@ config RCU_CPU_STALL_TIMEOUT
RCU grace period persists, additional CPU stall warnings are
printed at more widely spaced intervals.
config RCU_CPU_STALL_DETECTOR_RUNNABLE
bool "RCU CPU stall checking starts automatically at boot"
depends on RCU_CPU_STALL_DETECTOR
default y
help
If set, start checking for RCU CPU stalls immediately on
boot. Otherwise, RCU CPU stall checking must be manually
enabled.
Say Y if you are unsure.
Say N if you wish to suppress RCU CPU stall checking during boot.
config RCU_CPU_STALL_VERBOSE
bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
depends on TREE_PREEMPT_RCU
default y
help
This option causes RCU to printk detailed per-task information
......
......@@ -2187,7 +2187,6 @@ static const struct flag flags[] = {
{ "TASKLET_SOFTIRQ", 6 },
{ "SCHED_SOFTIRQ", 7 },
{ "HRTIMER_SOFTIRQ", 8 },
{ "RCU_SOFTIRQ", 9 },
{ "HRTIMER_NORESTART", 0 },
{ "HRTIMER_RESTART", 1 },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment