Commit 648c630c authored by Paul E. McKenney's avatar Paul E. McKenney

Merge branches 'doc.2015.12.05a', 'exp.2015.12.07a', 'fixes.2015.12.07a',...

Merge branches 'doc.2015.12.05a', 'exp.2015.12.07a', 'fixes.2015.12.07a', 'list.2015.12.04b' and 'torture.2015.12.05a' into HEAD

doc.2015.12.05a:  Documentation updates
exp.2015.12.07a:  Expedited grace-period updates
fixes.2015.12.07a:  Miscellaneous fixes
list.2015.12.04b:  Linked-list updates
torture.2015.12.05a:  Torture-test updates
...@@ -3296,18 +3296,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3296,18 +3296,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
rcutorture.verbose= [KNL] rcutorture.verbose= [KNL]
Enable additional printk() statements. Enable additional printk() statements.
rcupdate.rcu_cpu_stall_suppress= [KNL]
Suppress RCU CPU stall warning messages.
rcupdate.rcu_cpu_stall_timeout= [KNL]
Set timeout for RCU CPU stall warning messages.
rcupdate.rcu_expedited= [KNL] rcupdate.rcu_expedited= [KNL]
Use expedited grace-period primitives, for Use expedited grace-period primitives, for
example, synchronize_rcu_expedited() instead example, synchronize_rcu_expedited() instead
of synchronize_rcu(). This reduces latency, of synchronize_rcu(). This reduces latency,
but can increase CPU utilization, degrade but can increase CPU utilization, degrade
real-time latency, and degrade energy efficiency. real-time latency, and degrade energy efficiency.
No effect on CONFIG_TINY_RCU kernels.
rcupdate.rcu_cpu_stall_suppress= [KNL]
Suppress RCU CPU stall warning messages. rcupdate.rcu_normal= [KNL]
Use only normal grace-period primitives,
rcupdate.rcu_cpu_stall_timeout= [KNL] for example, synchronize_rcu() instead of
Set timeout for RCU CPU stall warning messages. synchronize_rcu_expedited(). This improves
real-time latency, CPU utilization, and
energy efficiency, but can expose users to
increased grace-period latency. This parameter
overrides rcupdate.rcu_expedited. No effect on
CONFIG_TINY_RCU kernels.
rcupdate.rcu_normal_after_boot= [KNL]
Once boot has completed (that is, after
rcu_end_inkernel_boot() has been invoked), use
only normal grace-period primitives. No effect
on CONFIG_TINY_RCU kernels.
rcupdate.rcu_task_stall_timeout= [KNL] rcupdate.rcu_task_stall_timeout= [KNL]
Set timeout in jiffies for RCU task stall warning Set timeout in jiffies for RCU task stall warning
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
static inline void INIT_LIST_HEAD(struct list_head *list) static inline void INIT_LIST_HEAD(struct list_head *list)
{ {
list->next = list; WRITE_ONCE(list->next, list);
list->prev = list; list->prev = list;
} }
...@@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new, ...@@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new,
next->prev = new; next->prev = new;
new->next = next; new->next = next;
new->prev = prev; new->prev = prev;
prev->next = new; WRITE_ONCE(prev->next, new);
} }
#else #else
extern void __list_add(struct list_head *new, extern void __list_add(struct list_head *new,
...@@ -186,7 +186,7 @@ static inline int list_is_last(const struct list_head *list, ...@@ -186,7 +186,7 @@ static inline int list_is_last(const struct list_head *list,
*/ */
static inline int list_empty(const struct list_head *head) static inline int list_empty(const struct list_head *head)
{ {
return head->next == head; return READ_ONCE(head->next) == head;
} }
/** /**
...@@ -608,7 +608,7 @@ static inline int hlist_unhashed(const struct hlist_node *h) ...@@ -608,7 +608,7 @@ static inline int hlist_unhashed(const struct hlist_node *h)
static inline int hlist_empty(const struct hlist_head *h) static inline int hlist_empty(const struct hlist_head *h)
{ {
return !h->first; return !READ_ONCE(h->first);
} }
static inline void __hlist_del(struct hlist_node *n) static inline void __hlist_del(struct hlist_node *n)
...@@ -642,7 +642,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) ...@@ -642,7 +642,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
n->next = first; n->next = first;
if (first) if (first)
first->pprev = &n->next; first->pprev = &n->next;
h->first = n; WRITE_ONCE(h->first, n);
n->pprev = &h->first; n->pprev = &h->first;
} }
...@@ -653,14 +653,14 @@ static inline void hlist_add_before(struct hlist_node *n, ...@@ -653,14 +653,14 @@ static inline void hlist_add_before(struct hlist_node *n,
n->pprev = next->pprev; n->pprev = next->pprev;
n->next = next; n->next = next;
next->pprev = &n->next; next->pprev = &n->next;
*(n->pprev) = n; WRITE_ONCE(*(n->pprev), n);
} }
static inline void hlist_add_behind(struct hlist_node *n, static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev) struct hlist_node *prev)
{ {
n->next = prev->next; n->next = prev->next;
prev->next = n; WRITE_ONCE(prev->next, n);
n->pprev = &prev->next; n->pprev = &prev->next;
if (n->next) if (n->next)
......
...@@ -70,7 +70,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h, ...@@ -70,7 +70,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
static inline int hlist_bl_empty(const struct hlist_bl_head *h) static inline int hlist_bl_empty(const struct hlist_bl_head *h)
{ {
return !((unsigned long)h->first & ~LIST_BL_LOCKMASK); return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
} }
static inline void hlist_bl_add_head(struct hlist_bl_node *n, static inline void hlist_bl_add_head(struct hlist_bl_node *n,
......
...@@ -57,7 +57,7 @@ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) ...@@ -57,7 +57,7 @@ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{ {
return is_a_nulls(h->first); return is_a_nulls(READ_ONCE(h->first));
} }
static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
......
...@@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old, ...@@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old,
} }
/** /**
* list_splice_init_rcu - splice an RCU-protected list into an existing list. * __list_splice_init_rcu - join an RCU-protected list into an existing list.
* @list: the RCU-protected list to splice * @list: the RCU-protected list to splice
* @head: the place in the list to splice the first list into * @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
* *
* @head can be RCU-read traversed concurrently with this function. * The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
* *
* Note that this function blocks. * Note that this function blocks.
* *
* Important note: the caller must take whatever action is necessary to * Important note: the caller must take whatever action is necessary to prevent
* prevent any other updates to @head. In principle, it is possible * any other updates to the existing list. In principle, it is possible to
* to modify the list as soon as sync() begins execution. * modify the list as soon as sync() begins execution. If this sort of thing
* If this sort of thing becomes necessary, an alternative version * becomes necessary, an alternative version based on call_rcu() could be
* based on call_rcu() could be created. But only if -really- * created. But only if -really- needed -- there is no shortage of RCU API
* needed -- there is no shortage of RCU API members. * members.
*/ */
static inline void list_splice_init_rcu(struct list_head *list, static inline void __list_splice_init_rcu(struct list_head *list,
struct list_head *head, struct list_head *prev,
void (*sync)(void)) struct list_head *next,
void (*sync)(void))
{ {
struct list_head *first = list->next; struct list_head *first = list->next;
struct list_head *last = list->prev; struct list_head *last = list->prev;
struct list_head *at = head->next;
if (list_empty(list))
return;
/* /*
* "first" and "last" tracking list, so initialize it. RCU readers * "first" and "last" tracking list, so initialize it. RCU readers
...@@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list, ...@@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list,
* this function. * this function.
*/ */
last->next = at; last->next = next;
rcu_assign_pointer(list_next_rcu(head), first); rcu_assign_pointer(list_next_rcu(prev), first);
first->prev = head; first->prev = prev;
at->prev = last; next->prev = last;
}
/**
* list_splice_init_rcu - splice an RCU-protected list into an existing list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head, head->next, sync);
}
/**
* list_splice_tail_init_rcu - splice an RCU-protected list into an existing
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head->prev, head, sync);
} }
/** /**
...@@ -304,6 +333,42 @@ static inline void list_splice_init_rcu(struct list_head *list, ...@@ -304,6 +333,42 @@ static inline void list_splice_init_rcu(struct list_head *list,
&pos->member != (head); \ &pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
* list_entry_lockless - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu(), but requires some implicit RCU
* read-side guarding. One example is running within a special
* exception-time environment where preemption is disabled and where
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
* example is when items are added to the list, but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))lockless_dereference(ptr), type, member)
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu(), but requires some implicit RCU
* read-side guarding. One example is running within a special
* exception-time environment where preemption is disabled and where
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
* example is when items are added to the list, but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
/** /**
* list_for_each_entry_continue_rcu - continue iteration over list of given type * list_for_each_entry_continue_rcu - continue iteration over list of given type
* @pos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
......
...@@ -48,10 +48,17 @@ ...@@ -48,10 +48,17 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#ifndef CONFIG_TINY_RCU
extern int rcu_expedited; /* for sysctl */ extern int rcu_expedited; /* for sysctl */
extern int rcu_normal; /* also for sysctl */
#endif /* #ifndef CONFIG_TINY_RCU */
#ifdef CONFIG_TINY_RCU #ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */
{
return true;
}
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
{ {
return false; return false;
...@@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void) ...@@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void)
{ {
} }
#else /* #ifdef CONFIG_TINY_RCU */ #else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void); void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void); void rcu_unexpedite_gp(void);
...@@ -321,7 +329,6 @@ static inline int rcu_preempt_depth(void) ...@@ -321,7 +329,6 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */ /* Internal to kernel */
void rcu_init(void); void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void); void rcu_sched_qs(void);
void rcu_bh_qs(void); void rcu_bh_qs(void);
void rcu_check_callbacks(int user); void rcu_check_callbacks(int user);
...@@ -329,6 +336,12 @@ struct notifier_block; ...@@ -329,6 +336,12 @@ struct notifier_block;
int rcu_cpu_notify(struct notifier_block *self, int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu); unsigned long action, void *hcpu);
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
#else /* #ifndef CONFIG_TINY_RCU */
static inline void rcu_end_inkernel_boot(void) { }
#endif /* #ifndef CONFIG_TINY_RCU */
#ifdef CONFIG_RCU_STALL_COMMON #ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void); void rcu_sysrq_start(void);
void rcu_sysrq_end(void); void rcu_sysrq_end(void);
...@@ -379,9 +392,9 @@ static inline void rcu_init_nohz(void) ...@@ -379,9 +392,9 @@ static inline void rcu_init_nohz(void)
*/ */
#define RCU_NONIDLE(a) \ #define RCU_NONIDLE(a) \
do { \ do { \
rcu_irq_enter(); \ rcu_irq_enter_irqson(); \
do { a; } while (0); \ do { a; } while (0); \
rcu_irq_exit(); \ rcu_irq_exit_irqson(); \
} while (0) } while (0)
/* /*
...@@ -741,7 +754,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -741,7 +754,7 @@ static inline void rcu_preempt_sleep_check(void)
* The tracing infrastructure traces RCU (we want that), but unfortunately * The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system. * some of the RCU checks causes tracing to lock up the system.
* *
* The tracing version of rcu_dereference_raw() must not call * The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held(). * rcu_read_lock_held().
*/ */
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
......
...@@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void) ...@@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void)
{ {
} }
static inline void rcu_irq_exit_irqson(void)
{
}
static inline void rcu_irq_enter_irqson(void)
{
}
static inline void rcu_irq_exit(void) static inline void rcu_irq_exit(void)
{ {
} }
......
...@@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void); ...@@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void);
/* /*
* Note a virtualization-based context switch. This is simply a * Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU * wrapper around rcu_note_context_switch(), which allows TINY_RCU
* to save a few bytes. * to save a few bytes. The caller must have disabled interrupts.
*/ */
static inline void rcu_virt_note_context_switch(int cpu) static inline void rcu_virt_note_context_switch(int cpu)
{ {
...@@ -97,6 +97,8 @@ void rcu_idle_enter(void); ...@@ -97,6 +97,8 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void); void rcu_idle_exit(void);
void rcu_irq_enter(void); void rcu_irq_enter(void);
void rcu_irq_exit(void); void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
void exit_rcu(void); void exit_rcu(void);
......
...@@ -171,8 +171,8 @@ extern void syscall_unregfunc(void); ...@@ -171,8 +171,8 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond), \ TP_CONDITION(cond), \
rcu_irq_enter(), \ rcu_irq_enter_irqson(), \
rcu_irq_exit()); \ rcu_irq_exit_irqson()); \
} }
#else #else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
......
...@@ -943,6 +943,8 @@ static int __ref kernel_init(void *unused) ...@@ -943,6 +943,8 @@ static int __ref kernel_init(void *unused)
flush_delayed_fput(); flush_delayed_fput();
rcu_end_inkernel_boot();
if (ramdisk_execute_command) { if (ramdisk_execute_command) {
ret = run_init_process(ramdisk_execute_command); ret = run_init_process(ramdisk_execute_command);
if (!ret) if (!ret)
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/rcupdate.h> /* rcu_expedited */ #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
#define KERNEL_ATTR_RO(_name) \ #define KERNEL_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name) static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
...@@ -144,11 +144,12 @@ static ssize_t fscaps_show(struct kobject *kobj, ...@@ -144,11 +144,12 @@ static ssize_t fscaps_show(struct kobject *kobj,
} }
KERNEL_ATTR_RO(fscaps); KERNEL_ATTR_RO(fscaps);
#ifndef CONFIG_TINY_RCU
int rcu_expedited; int rcu_expedited;
static ssize_t rcu_expedited_show(struct kobject *kobj, static ssize_t rcu_expedited_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
return sprintf(buf, "%d\n", rcu_expedited); return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited));
} }
static ssize_t rcu_expedited_store(struct kobject *kobj, static ssize_t rcu_expedited_store(struct kobject *kobj,
struct kobj_attribute *attr, struct kobj_attribute *attr,
...@@ -161,6 +162,24 @@ static ssize_t rcu_expedited_store(struct kobject *kobj, ...@@ -161,6 +162,24 @@ static ssize_t rcu_expedited_store(struct kobject *kobj,
} }
KERNEL_ATTR_RW(rcu_expedited); KERNEL_ATTR_RW(rcu_expedited);
int rcu_normal;
static ssize_t rcu_normal_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", READ_ONCE(rcu_normal));
}
static ssize_t rcu_normal_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
if (kstrtoint(buf, 0, &rcu_normal))
return -EINVAL;
return count;
}
KERNEL_ATTR_RW(rcu_normal);
#endif /* #ifndef CONFIG_TINY_RCU */
/* /*
* Make /sys/kernel/notes give the raw contents of our kernel .notes section. * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
*/ */
...@@ -202,7 +221,10 @@ static struct attribute * kernel_attrs[] = { ...@@ -202,7 +221,10 @@ static struct attribute * kernel_attrs[] = {
&kexec_crash_size_attr.attr, &kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr, &vmcoreinfo_attr.attr,
#endif #endif
#ifndef CONFIG_TINY_RCU
&rcu_expedited_attr.attr, &rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
#endif
NULL NULL
}; };
......
...@@ -162,6 +162,27 @@ static int rcu_torture_writer_state; ...@@ -162,6 +162,27 @@ static int rcu_torture_writer_state;
#define RTWS_SYNC 7 #define RTWS_SYNC 7
#define RTWS_STUTTER 8 #define RTWS_STUTTER 8
#define RTWS_STOPPING 9 #define RTWS_STOPPING 9
static const char * const rcu_torture_writer_state_names[] = {
"RTWS_FIXED_DELAY",
"RTWS_DELAY",
"RTWS_REPLACE",
"RTWS_DEF_FREE",
"RTWS_EXP_SYNC",
"RTWS_COND_GET",
"RTWS_COND_SYNC",
"RTWS_SYNC",
"RTWS_STUTTER",
"RTWS_STOPPING",
};
static const char *rcu_torture_writer_state_getname(void)
{
unsigned int i = READ_ONCE(rcu_torture_writer_state);
if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
return "???";
return rcu_torture_writer_state_names[i];
}
#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
#define RCUTORTURE_RUNNABLE_INIT 1 #define RCUTORTURE_RUNNABLE_INIT 1
...@@ -1307,7 +1328,8 @@ rcu_torture_stats_print(void) ...@@ -1307,7 +1328,8 @@ rcu_torture_stats_print(void)
rcutorture_get_gp_data(cur_ops->ttype, rcutorture_get_gp_data(cur_ops->ttype,
&flags, &gpnum, &completed); &flags, &gpnum, &completed);
pr_alert("??? Writer stall state %d g%lu c%lu f%#x\n", pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x\n",
rcu_torture_writer_state_getname(),
rcu_torture_writer_state, rcu_torture_writer_state,
gpnum, completed, flags); gpnum, completed, flags);
show_rcu_gp_kthreads(); show_rcu_gp_kthreads();
......
...@@ -489,7 +489,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -489,7 +489,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
*/ */
void synchronize_srcu(struct srcu_struct *sp) void synchronize_srcu(struct srcu_struct *sp)
{ {
__synchronize_srcu(sp, rcu_gp_is_expedited() __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal())
? SYNCHRONIZE_SRCU_EXP_TRYCOUNT ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
: SYNCHRONIZE_SRCU_TRYCOUNT); : SYNCHRONIZE_SRCU_TRYCOUNT);
} }
......
This diff is collapsed.
...@@ -178,6 +178,8 @@ struct rcu_node { ...@@ -178,6 +178,8 @@ struct rcu_node {
/* beginning of each expedited GP. */ /* beginning of each expedited GP. */
unsigned long expmaskinitnext; unsigned long expmaskinitnext;
/* Online CPUs for next expedited GP. */ /* Online CPUs for next expedited GP. */
/* Any CPU that has ever been online will */
/* have its bit set. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */ /* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */ int grplo; /* lowest-numbered CPU or group here. */
...@@ -384,6 +386,10 @@ struct rcu_data { ...@@ -384,6 +386,10 @@ struct rcu_data {
struct rcu_head oom_head; struct rcu_head oom_head;
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
struct mutex exp_funnel_mutex; struct mutex exp_funnel_mutex;
atomic_long_t expedited_workdone0; /* # done by others #0. */
atomic_long_t expedited_workdone1; /* # done by others #1. */
atomic_long_t expedited_workdone2; /* # done by others #2. */
atomic_long_t expedited_workdone3; /* # done by others #3. */
/* 7) Callback offloading. */ /* 7) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
...@@ -498,10 +504,6 @@ struct rcu_state { ...@@ -498,10 +504,6 @@ struct rcu_state {
/* End of fields guarded by barrier_mutex. */ /* End of fields guarded by barrier_mutex. */
unsigned long expedited_sequence; /* Take a ticket. */ unsigned long expedited_sequence; /* Take a ticket. */
atomic_long_t expedited_workdone0; /* # done by others #0. */
atomic_long_t expedited_workdone1; /* # done by others #1. */
atomic_long_t expedited_workdone2; /* # done by others #2. */
atomic_long_t expedited_workdone3; /* # done by others #3. */
atomic_long_t expedited_normal; /* # fallbacks to normal. */ atomic_long_t expedited_normal; /* # fallbacks to normal. */
atomic_t expedited_need_qs; /* # CPUs left to check in. */ atomic_t expedited_need_qs; /* # CPUs left to check in. */
wait_queue_head_t expedited_wq; /* Wait for check-ins. */ wait_queue_head_t expedited_wq; /* Wait for check-ins. */
...@@ -545,6 +547,18 @@ struct rcu_state { ...@@ -545,6 +547,18 @@ struct rcu_state {
#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */ #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */ #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
#ifndef RCU_TREE_NONCORE
static const char * const gp_state_names[] = {
"RCU_GP_IDLE",
"RCU_GP_WAIT_GPS",
"RCU_GP_DONE_GPS",
"RCU_GP_WAIT_FQS",
"RCU_GP_DOING_FQS",
"RCU_GP_CLEANUP",
"RCU_GP_CLEANED",
};
#endif /* #ifndef RCU_TREE_NONCORE */
extern struct list_head rcu_struct_flavors; extern struct list_head rcu_struct_flavors;
/* Sequence through rcu_state structures for each RCU flavor. */ /* Sequence through rcu_state structures for each RCU flavor. */
......
...@@ -63,8 +63,7 @@ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ ...@@ -63,8 +63,7 @@ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
/* /*
* Check the RCU kernel configuration parameters and print informative * Check the RCU kernel configuration parameters and print informative
* messages about anything out of the ordinary. If you like #ifdef, you * messages about anything out of the ordinary.
* will love this function.
*/ */
static void __init rcu_bootup_announce_oddness(void) static void __init rcu_bootup_announce_oddness(void)
{ {
...@@ -147,8 +146,8 @@ static void __init rcu_bootup_announce(void) ...@@ -147,8 +146,8 @@ static void __init rcu_bootup_announce(void)
* the corresponding expedited grace period will also be the end of the * the corresponding expedited grace period will also be the end of the
* normal grace period. * normal grace period.
*/ */
static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
unsigned long flags) __releases(rnp->lock) __releases(rnp->lock) /* But leaves rrupts disabled. */
{ {
int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) + (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
...@@ -236,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -236,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
rnp->gp_tasks = &t->rcu_node_entry; rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry; rnp->exp_tasks = &t->rcu_node_entry;
raw_spin_unlock(&rnp->lock); raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
/* /*
* Report the quiescent state for the expedited GP. This expedited * Report the quiescent state for the expedited GP. This expedited
...@@ -251,7 +250,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -251,7 +250,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
} else { } else {
WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs); WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
} }
local_irq_restore(flags);
} }
/* /*
...@@ -286,12 +284,11 @@ static void rcu_preempt_qs(void) ...@@ -286,12 +284,11 @@ static void rcu_preempt_qs(void)
* predating the current grace period drain, in other words, until * predating the current grace period drain, in other words, until
* rnp->gp_tasks becomes NULL. * rnp->gp_tasks becomes NULL.
* *
* Caller must disable preemption. * Caller must disable interrupts.
*/ */
static void rcu_preempt_note_context_switch(void) static void rcu_preempt_note_context_switch(void)
{ {
struct task_struct *t = current; struct task_struct *t = current;
unsigned long flags;
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_node *rnp; struct rcu_node *rnp;
...@@ -301,7 +298,7 @@ static void rcu_preempt_note_context_switch(void) ...@@ -301,7 +298,7 @@ static void rcu_preempt_note_context_switch(void)
/* Possibly blocking in an RCU read-side critical section. */ /* Possibly blocking in an RCU read-side critical section. */
rdp = this_cpu_ptr(rcu_state_p->rda); rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_rcu_node(rnp);
t->rcu_read_unlock_special.b.blocked = true; t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp; t->rcu_blocked_node = rnp;
...@@ -317,7 +314,7 @@ static void rcu_preempt_note_context_switch(void) ...@@ -317,7 +314,7 @@ static void rcu_preempt_note_context_switch(void)
(rnp->qsmask & rdp->grpmask) (rnp->qsmask & rdp->grpmask)
? rnp->gpnum ? rnp->gpnum
: rnp->gpnum + 1); : rnp->gpnum + 1);
rcu_preempt_ctxt_queue(rnp, rdp, flags); rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 && } else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) { t->rcu_read_unlock_special.s) {
...@@ -449,19 +446,13 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -449,19 +446,13 @@ void rcu_read_unlock_special(struct task_struct *t)
/* /*
* Remove this task from the list it blocked on. The task * Remove this task from the list it blocked on. The task
* now remains queued on the rcu_node corresponding to * now remains queued on the rcu_node corresponding to the
* the CPU it first blocked on, so the first attempt to * CPU it first blocked on, so there is no longer any need
* acquire the task's rcu_node's ->lock will succeed. * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
* Keep the loop and add a WARN_ON() out of sheer paranoia.
*/ */
for (;;) { rnp = t->rcu_blocked_node;
rnp = t->rcu_blocked_node; raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ WARN_ON_ONCE(rnp != t->rcu_blocked_node);
if (rnp == t->rcu_blocked_node)
break;
WARN_ON_ONCE(1);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
empty_exp = sync_rcu_preempt_exp_done(rnp); empty_exp = sync_rcu_preempt_exp_done(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
...@@ -746,6 +737,12 @@ void synchronize_rcu_expedited(void) ...@@ -746,6 +737,12 @@ void synchronize_rcu_expedited(void)
struct rcu_state *rsp = rcu_state_p; struct rcu_state *rsp = rcu_state_p;
unsigned long s; unsigned long s;
/* If expedited grace periods are prohibited, fall back to normal. */
if (rcu_gp_is_normal()) {
wait_rcu_gp(call_rcu);
return;
}
s = rcu_exp_gp_seq_snap(rsp); s = rcu_exp_gp_seq_snap(rsp);
rnp_unlock = exp_funnel_lock(rsp, s); rnp_unlock = exp_funnel_lock(rsp, s);
...@@ -786,7 +783,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); ...@@ -786,7 +783,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier);
*/ */
static void __init __rcu_init_preempt(void) static void __init __rcu_init_preempt(void)
{ {
rcu_init_one(rcu_state_p, rcu_data_p); rcu_init_one(rcu_state_p);
} }
/* /*
...@@ -1520,7 +1517,8 @@ static void rcu_prepare_for_idle(void) ...@@ -1520,7 +1517,8 @@ static void rcu_prepare_for_idle(void)
struct rcu_state *rsp; struct rcu_state *rsp;
int tne; int tne;
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
rcu_is_nocb_cpu(smp_processor_id()))
return; return;
/* Handle nohz enablement switches conservatively. */ /* Handle nohz enablement switches conservatively. */
...@@ -1534,10 +1532,6 @@ static void rcu_prepare_for_idle(void) ...@@ -1534,10 +1532,6 @@ static void rcu_prepare_for_idle(void)
if (!tne) if (!tne)
return; return;
/* If this is a no-CBs CPU, no callbacks, just return. */
if (rcu_is_nocb_cpu(smp_processor_id()))
return;
/* /*
* If a non-lazy callback arrived at a CPU having only lazy * If a non-lazy callback arrived at a CPU having only lazy
* callbacks, invoke RCU core for the side-effect of recalculating * callbacks, invoke RCU core for the side-effect of recalculating
......
/* /*
* Read-Copy Update tracing for classic implementation * Read-Copy Update tracing for hierarchical implementation.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* http://www.gnu.org/licenses/gpl-2.0.html. * http://www.gnu.org/licenses/gpl-2.0.html.
* *
* Copyright IBM Corporation, 2008 * Copyright IBM Corporation, 2008
* Author: Paul E. McKenney
* *
* Papers: http://www.rdrop.com/users/paulmck/RCU * Papers: http://www.rdrop.com/users/paulmck/RCU
* *
...@@ -33,9 +34,7 @@ ...@@ -33,9 +34,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/module.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/cpu.h> #include <linux/cpu.h>
...@@ -183,14 +182,20 @@ static const struct file_operations rcudata_fops = { ...@@ -183,14 +182,20 @@ static const struct file_operations rcudata_fops = {
static int show_rcuexp(struct seq_file *m, void *v) static int show_rcuexp(struct seq_file *m, void *v)
{ {
int cpu;
struct rcu_state *rsp = (struct rcu_state *)m->private; struct rcu_state *rsp = (struct rcu_state *)m->private;
struct rcu_data *rdp;
unsigned long s0 = 0, s1 = 0, s2 = 0, s3 = 0;
for_each_possible_cpu(cpu) {
rdp = per_cpu_ptr(rsp->rda, cpu);
s0 += atomic_long_read(&rdp->expedited_workdone0);
s1 += atomic_long_read(&rdp->expedited_workdone1);
s2 += atomic_long_read(&rdp->expedited_workdone2);
s3 += atomic_long_read(&rdp->expedited_workdone3);
}
seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n", seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
rsp->expedited_sequence, rsp->expedited_sequence, s0, s1, s2, s3,
atomic_long_read(&rsp->expedited_workdone0),
atomic_long_read(&rsp->expedited_workdone1),
atomic_long_read(&rsp->expedited_workdone2),
atomic_long_read(&rsp->expedited_workdone3),
atomic_long_read(&rsp->expedited_normal), atomic_long_read(&rsp->expedited_normal),
atomic_read(&rsp->expedited_need_qs), atomic_read(&rsp->expedited_need_qs),
rsp->expedited_sequence / 2); rsp->expedited_sequence / 2);
...@@ -487,16 +492,4 @@ static int __init rcutree_trace_init(void) ...@@ -487,16 +492,4 @@ static int __init rcutree_trace_init(void)
debugfs_remove_recursive(rcudir); debugfs_remove_recursive(rcudir);
return 1; return 1;
} }
device_initcall(rcutree_trace_init);
static void __exit rcutree_trace_cleanup(void)
{
debugfs_remove_recursive(rcudir);
}
module_init(rcutree_trace_init);
module_exit(rcutree_trace_cleanup);
MODULE_AUTHOR("Paul E. McKenney");
MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
MODULE_LICENSE("GPL");
...@@ -60,7 +60,12 @@ MODULE_ALIAS("rcupdate"); ...@@ -60,7 +60,12 @@ MODULE_ALIAS("rcupdate");
#endif #endif
#define MODULE_PARAM_PREFIX "rcupdate." #define MODULE_PARAM_PREFIX "rcupdate."
#ifndef CONFIG_TINY_RCU
module_param(rcu_expedited, int, 0); module_param(rcu_expedited, int, 0);
module_param(rcu_normal, int, 0);
static int rcu_normal_after_boot;
module_param(rcu_normal_after_boot, int, 0);
#endif /* #ifndef CONFIG_TINY_RCU */
#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT) #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
/** /**
...@@ -113,6 +118,17 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held); ...@@ -113,6 +118,17 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
#ifndef CONFIG_TINY_RCU #ifndef CONFIG_TINY_RCU
/*
* Should expedited grace-period primitives always fall back to their
* non-expedited counterparts? Intended for use within RCU. Note
* that if the user specifies both rcu_expedited and rcu_normal, then
* rcu_normal wins.
*/
bool rcu_gp_is_normal(void)
{
return READ_ONCE(rcu_normal);
}
static atomic_t rcu_expedited_nesting = static atomic_t rcu_expedited_nesting =
ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0); ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
...@@ -157,8 +173,6 @@ void rcu_unexpedite_gp(void) ...@@ -157,8 +173,6 @@ void rcu_unexpedite_gp(void)
} }
EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
#endif /* #ifndef CONFIG_TINY_RCU */
/* /*
* Inform RCU of the end of the in-kernel boot sequence. * Inform RCU of the end of the in-kernel boot sequence.
*/ */
...@@ -166,8 +180,12 @@ void rcu_end_inkernel_boot(void) ...@@ -166,8 +180,12 @@ void rcu_end_inkernel_boot(void)
{ {
if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT)) if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
rcu_unexpedite_gp(); rcu_unexpedite_gp();
if (rcu_normal_after_boot)
WRITE_ONCE(rcu_normal, 1);
} }
#endif /* #ifndef CONFIG_TINY_RCU */
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_PREEMPT_RCU
/* /*
......
...@@ -3085,7 +3085,6 @@ static void __sched notrace __schedule(bool preempt) ...@@ -3085,7 +3085,6 @@ static void __sched notrace __schedule(bool preempt)
cpu = smp_processor_id(); cpu = smp_processor_id();
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
rcu_note_context_switch();
prev = rq->curr; prev = rq->curr;
/* /*
...@@ -3104,13 +3103,16 @@ static void __sched notrace __schedule(bool preempt) ...@@ -3104,13 +3103,16 @@ static void __sched notrace __schedule(bool preempt)
if (sched_feat(HRTICK)) if (sched_feat(HRTICK))
hrtick_clear(rq); hrtick_clear(rq);
local_irq_disable();
rcu_note_context_switch();
/* /*
* Make sure that signal_pending_state()->signal_pending() below * Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up(). * done by the caller to avoid the race with signal_wake_up().
*/ */
smp_mb__before_spinlock(); smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock); raw_spin_lock(&rq->lock);
lockdep_pin_lock(&rq->lock); lockdep_pin_lock(&rq->lock);
rq->clock_skip_update <<= 1; /* promote REQ to ACT */ rq->clock_skip_update <<= 1; /* promote REQ to ACT */
......
...@@ -37,7 +37,7 @@ void __list_add(struct list_head *new, ...@@ -37,7 +37,7 @@ void __list_add(struct list_head *new,
next->prev = new; next->prev = new;
new->next = next; new->next = next;
new->prev = prev; new->prev = prev;
prev->next = new; WRITE_ONCE(prev->next, new);
} }
EXPORT_SYMBOL(__list_add); EXPORT_SYMBOL(__list_add);
......
...@@ -38,8 +38,6 @@ ...@@ -38,8 +38,6 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
grace=120
T=/tmp/kvm-test-1-run.sh.$$ T=/tmp/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
touch $T touch $T
...@@ -152,7 +150,7 @@ fi ...@@ -152,7 +150,7 @@ fi
qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`" qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
# Generate architecture-specific and interaction-specific qemu arguments # Generate architecture-specific and interaction-specific qemu arguments
qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$builddir/console.log"`" qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$resdir/console.log"`"
# Generate qemu -append arguments # Generate qemu -append arguments
qemu_append="`identify_qemu_append "$QEMU"`" qemu_append="`identify_qemu_append "$QEMU"`"
...@@ -168,7 +166,7 @@ then ...@@ -168,7 +166,7 @@ then
touch $resdir/buildonly touch $resdir/buildonly
exit 0 exit 0
fi fi
echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & ( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
qemu_pid=$! qemu_pid=$!
...@@ -214,7 +212,7 @@ then ...@@ -214,7 +212,7 @@ then
else else
break break
fi fi
if test $kruntime -ge $((seconds + grace)) if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
then then
echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1 echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
kill -KILL $qemu_pid kill -KILL $qemu_pid
...@@ -224,6 +222,5 @@ then ...@@ -224,6 +222,5 @@ then
done done
fi fi
cp $builddir/console.log $resdir
parse-torture.sh $resdir/console.log $title parse-torture.sh $resdir/console.log $title
parse-console.sh $resdir/console.log $title parse-console.sh $resdir/console.log $title
...@@ -42,6 +42,7 @@ TORTURE_DEFCONFIG=defconfig ...@@ -42,6 +42,7 @@ TORTURE_DEFCONFIG=defconfig
TORTURE_BOOT_IMAGE="" TORTURE_BOOT_IMAGE=""
TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD
TORTURE_KMAKE_ARG="" TORTURE_KMAKE_ARG=""
TORTURE_SHUTDOWN_GRACE=180
TORTURE_SUITE=rcu TORTURE_SUITE=rcu
resdir="" resdir=""
configs="" configs=""
...@@ -149,6 +150,11 @@ do ...@@ -149,6 +150,11 @@ do
resdir=$2 resdir=$2
shift shift
;; ;;
--shutdown-grace)
checkarg --shutdown-grace "(seconds)" "$#" "$2" '^[0-9]*$' '^error'
TORTURE_SHUTDOWN_GRACE=$2
shift
;;
--torture) --torture)
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--' checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
TORTURE_SUITE=$2 TORTURE_SUITE=$2
...@@ -266,6 +272,7 @@ TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG ...@@ -266,6 +272,7 @@ TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG
TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD
TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE
TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC
TORTURE_SHUTDOWN_GRACE="$TORTURE_SHUTDOWN_GRACE"; export TORTURE_SHUTDOWN_GRACE
TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE
if ! test -e $resdir if ! test -e $resdir
then then
...@@ -307,10 +314,10 @@ awk < $T/cfgcpu.pack \ ...@@ -307,10 +314,10 @@ awk < $T/cfgcpu.pack \
} }
# Dump out the scripting required to run one test batch. # Dump out the scripting required to run one test batch.
function dump(first, pastlast) function dump(first, pastlast, batchnum)
{ {
print "echo ----Start batch: `date`"; print "echo ----Start batch " batchnum ": `date`";
print "echo ----Start batch: `date` >> " rd "/log"; print "echo ----Start batch " batchnum ": `date` >> " rd "/log";
jn=1 jn=1
for (j = first; j < pastlast; j++) { for (j = first; j < pastlast; j++) {
builddir=KVM "/b" jn builddir=KVM "/b" jn
...@@ -371,25 +378,28 @@ END { ...@@ -371,25 +378,28 @@ END {
njobs = i; njobs = i;
nc = ncpus; nc = ncpus;
first = 0; first = 0;
batchnum = 1;
# Each pass through the following loop considers one test. # Each pass through the following loop considers one test.
for (i = 0; i < njobs; i++) { for (i = 0; i < njobs; i++) {
if (ncpus == 0) { if (ncpus == 0) {
# Sequential test specified, each test its own batch. # Sequential test specified, each test its own batch.
dump(i, i + 1); dump(i, i + 1, batchnum);
first = i; first = i;
batchnum++;
} else if (nc < cpus[i] && i != 0) { } else if (nc < cpus[i] && i != 0) {
# Out of CPUs, dump out a batch. # Out of CPUs, dump out a batch.
dump(first, i); dump(first, i, batchnum);
first = i; first = i;
nc = ncpus; nc = ncpus;
batchnum++;
} }
# Account for the CPUs needed by the current test. # Account for the CPUs needed by the current test.
nc -= cpus[i]; nc -= cpus[i];
} }
# Dump the last batch. # Dump the last batch.
if (ncpus != 0) if (ncpus != 0)
dump(first, i); dump(first, i, batchnum);
}' >> $T/script }' >> $T/script
cat << ___EOF___ >> $T/script cat << ___EOF___ >> $T/script
......
...@@ -24,9 +24,6 @@ ...@@ -24,9 +24,6 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/abat-chk-badness.sh.$$
trap 'rm -f $T' 0
file="$1" file="$1"
title="$2" title="$2"
...@@ -36,9 +33,41 @@ if grep -Pq '\x00' < $file ...@@ -36,9 +33,41 @@ if grep -Pq '\x00' < $file
then then
print_warning Console output contains nul bytes, old qemu still running? print_warning Console output contains nul bytes, old qemu still running?
fi fi
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
if test -s $T if test -s $1.diags
then then
print_warning Assertion failure in $file $title print_warning Assertion failure in $file $title
cat $T # cat $1.diags
summary=""
n_badness=`grep -c Badness $1`
if test "$n_badness" -ne 0
then
summary="$summary Badness: $n_badness"
fi
n_warn=`grep -v 'Warning: unable to open an initial console' $1 | egrep -c 'WARNING:|Warn'`
if test "$n_warn" -ne 0
then
summary="$summary Warnings: $n_warn"
fi
n_bugs=`egrep -c 'BUG|Oops:' $1`
if test "$n_bugs" -ne 0
then
summary="$summary Bugs: $n_bugs"
fi
n_calltrace=`grep -c 'Call Trace:' $1`
if test "$n_calltrace" -ne 0
then
summary="$summary Call Traces: $n_calltrace"
fi
n_lockdep=`grep -c =========== $1`
if test "$n_badness" -ne 0
then
summary="$summary lockdep: $n_badness"
fi
n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|Stall ended before state dump start' $1`
if test "$n_stalls" -ne 0
then
summary="$summary Stalls: $n_stalls"
fi
print_warning Summary: $summary
fi fi
...@@ -20,7 +20,6 @@ CONFIG_PROVE_RCU ...@@ -20,7 +20,6 @@ CONFIG_PROVE_RCU
CONFIG_NO_HZ_FULL_SYSIDLE CONFIG_NO_HZ_FULL_SYSIDLE
CONFIG_RCU_NOCB_CPU CONFIG_RCU_NOCB_CPU
CONFIG_RCU_USER_QS
Meaningless for TINY_RCU. Meaningless for TINY_RCU.
......
...@@ -72,10 +72,6 @@ CONFIG_RCU_TORTURE_TEST_RUNNABLE ...@@ -72,10 +72,6 @@ CONFIG_RCU_TORTURE_TEST_RUNNABLE
Always used in KVM testing. Always used in KVM testing.
CONFIG_RCU_USER_QS
Redundant with CONFIG_NO_HZ_FULL.
CONFIG_PREEMPT_RCU CONFIG_PREEMPT_RCU
CONFIG_TREE_RCU CONFIG_TREE_RCU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment