Commit f5604f67 authored by Paul E. McKenney's avatar Paul E. McKenney

Merge branch 'torture.2014.02.23a' into HEAD

torture.2014.02.23a: locktorture addition and rcutorture changes
parents 322efba5 73fa867e
/*
* Common functions for in-kernel torture tests.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2014
*
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
#ifndef __LINUX_TORTURE_H
#define __LINUX_TORTURE_H
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#include <linux/lockdep.h>
#include <linux/completion.h>
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
/* Definitions for a non-string torture-test module parameter. */
#define torture_param(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg);
#define TORTURE_FLAG "-torture:"
#define TOROUT_STRING(s) \
pr_alert("%s" TORTURE_FLAG s "\n", torture_type)
#define VERBOSE_TOROUT_STRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
#define VERBOSE_TOROUT_ERRSTRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
/* Definitions for a non-string torture-test module parameter. */
#define torture_parm(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg);
/* Definitions for online/offline exerciser. */
int torture_onoff_init(long ooholdoff, long oointerval);
char *torture_onoff_stats(char *page);
bool torture_onoff_failures(void);
/* Low-rider random number generator. */
struct torture_random_state {
unsigned long trs_state;
long trs_count;
};
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
void torture_shuffle_task_register(struct task_struct *tp);
int torture_shuffle_init(long shuffint);
/* Test auto-shutdown handling. */
void torture_shutdown_absorb(const char *title);
int torture_shutdown_init(int ssecs, void (*cleanup)(void));
/* Task stuttering, which forces load/no-load transitions. */
void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
void torture_init_begin(char *ttype, bool v, int *runnable);
void torture_init_end(void);
bool torture_cleanup(void);
bool torture_must_stop(void);
bool torture_must_stop_irq(void);
void torture_kthread_stopping(char *title);
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
char *f, struct task_struct **tp);
void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_create_kthread(n, arg, tp) \
_torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
"Failed to create " #n, &(tp))
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
#endif /* __LINUX_TORTURE_H */
......@@ -93,6 +93,7 @@ obj-$(CONFIG_PADATA) += padata.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
obj-$(CONFIG_TORTURE_TEST) += torture.o
$(obj)/configs.o: $(obj)/config_data.h
......
......@@ -23,3 +23,4 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
/*
* Module-based torture test facility for locking
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright (C) IBM Corporation, 2014
*
* Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/trace_clock.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
torture_param(int, nwriters_stress, -1,
"Number of write-locking stress-test threads");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
"Time between CPU hotplugs (s), 0=disable");
torture_param(int, shuffle_interval, 3,
"Number of jiffies between shuffles, 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(bool, verbose, true,
"Enable verbose debugging printk()s");
static char *torture_type = "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
"Type of lock to torture (spin_lock, spin_lock_irq, ...)");
static atomic_t n_lock_torture_errors;
static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
static int nrealwriters_stress;
static bool lock_is_write_held;
struct lock_writer_stress_stats {
long n_write_lock_fail;
long n_write_lock_acquired;
};
static struct lock_writer_stress_stats *lwsa;
#if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE)
#define LOCKTORTURE_RUNNABLE_INIT 1
#else
#define LOCKTORTURE_RUNNABLE_INIT 0
#endif
int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
module_param(locktorture_runnable, int, 0444);
MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot");
/* Forward reference. */
static void lock_torture_cleanup(void);
/*
* Operations vector for selecting different types of tests.
*/
struct lock_torture_ops {
void (*init)(void);
int (*writelock)(void);
void (*write_delay)(struct torture_random_state *trsp);
void (*writeunlock)(void);
unsigned long flags;
const char *name;
};
static struct lock_torture_ops *cur_ops;
/*
* Definitions for lock torture testing.
*/
static int torture_lock_busted_write_lock(void)
{
return 0; /* BUGGY, do not use in real life!!! */
}
static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
{
const unsigned long longdelay_us = 100;
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
(nrealwriters_stress * 2000 * longdelay_us)))
mdelay(longdelay_us);
#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
preempt_schedule(); /* Allow test to be preempted. */
#endif
}
static void torture_lock_busted_write_unlock(void)
{
/* BUGGY, do not use in real life!!! */
}
static struct lock_torture_ops lock_busted_ops = {
.writelock = torture_lock_busted_write_lock,
.write_delay = torture_lock_busted_write_delay,
.writeunlock = torture_lock_busted_write_unlock,
.name = "lock_busted"
};
static DEFINE_SPINLOCK(torture_spinlock);
static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
{
spin_lock(&torture_spinlock);
return 0;
}
static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_us = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(nrealwriters_stress * 2000 * longdelay_us)))
mdelay(longdelay_us);
if (!(torture_random(trsp) %
(nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
preempt_schedule(); /* Allow test to be preempted. */
#endif
}
static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
{
spin_unlock(&torture_spinlock);
}
static struct lock_torture_ops spin_lock_ops = {
.writelock = torture_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.writeunlock = torture_spin_lock_write_unlock,
.name = "spin_lock"
};
static int torture_spin_lock_write_lock_irq(void)
__acquires(torture_spinlock_irq)
{
unsigned long flags;
spin_lock_irqsave(&torture_spinlock, flags);
cur_ops->flags = flags;
return 0;
}
static void torture_lock_spin_write_unlock_irq(void)
__releases(torture_spinlock)
{
spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
}
static struct lock_torture_ops spin_lock_irq_ops = {
.writelock = torture_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.writeunlock = torture_lock_spin_write_unlock_irq,
.name = "spin_lock_irq"
};
/*
* Lock torture writer kthread. Repeatedly acquires and releases
* the lock, checking for duplicate acquisitions.
*/
static int lock_torture_writer(void *arg)
{
struct lock_writer_stress_stats *lwsp = arg;
static DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
set_user_nice(current, 19);
do {
schedule_timeout_uninterruptible(1);
cur_ops->writelock();
if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_write_lock_fail++;
lock_is_write_held = 1;
lwsp->n_write_lock_acquired++;
cur_ops->write_delay(&rand);
lock_is_write_held = 0;
cur_ops->writeunlock();
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_writer");
return 0;
}
/*
* Create an lock-torture-statistics message in the specified buffer.
*/
static void lock_torture_printk(char *page)
{
bool fail = 0;
int i;
long max = 0;
long min = lwsa[0].n_write_lock_acquired;
long long sum = 0;
for (i = 0; i < nrealwriters_stress; i++) {
if (lwsa[i].n_write_lock_fail)
fail = true;
sum += lwsa[i].n_write_lock_acquired;
if (max < lwsa[i].n_write_lock_fail)
max = lwsa[i].n_write_lock_fail;
if (min > lwsa[i].n_write_lock_fail)
min = lwsa[i].n_write_lock_fail;
}
page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
page += sprintf(page,
"Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
sum, max, min, max / 2 > min ? "???" : "",
fail, fail ? "!!!" : "");
if (fail)
atomic_inc(&n_lock_torture_errors);
}
/*
* Print torture statistics. Caller must ensure that there is only one
* call to this function at a given time!!! This is normally accomplished
* by relying on the module system to only have one copy of the module
* loaded, and then by giving the lock_torture_stats kthread full control
* (or the init/cleanup functions when lock_torture_stats thread is not
* running).
*/
static void lock_torture_stats_print(void)
{
int size = nrealwriters_stress * 200 + 8192;
char *buf;
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
lock_torture_printk(buf);
pr_alert("%s", buf);
kfree(buf);
}
/*
* Periodically prints torture statistics, if periodic statistics printing
* was specified via the stat_interval module parameter.
*
* No need to worry about fullstop here, since this one doesn't reference
* volatile state or register callbacks.
*/
static int lock_torture_stats(void *arg)
{
VERBOSE_TOROUT_STRING("lock_torture_stats task started");
do {
schedule_timeout_interruptible(stat_interval * HZ);
lock_torture_stats_print();
torture_shutdown_absorb("lock_torture_stats");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_stats");
return 0;
}
static inline void
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
"--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, nrealwriters_stress, stat_interval, verbose,
shuffle_interval, stutter, shutdown_secs,
onoff_interval, onoff_holdoff);
}
static void lock_torture_cleanup(void)
{
int i;
if (torture_cleanup())
return;
if (writer_tasks) {
for (i = 0; i < nrealwriters_stress; i++)
torture_stop_kthread(lock_torture_writer,
writer_tasks[i]);
kfree(writer_tasks);
writer_tasks = NULL;
}
torture_stop_kthread(lock_torture_stats, stats_task);
lock_torture_stats_print(); /* -After- the stats thread is stopped! */
if (atomic_read(&n_lock_torture_errors))
lock_torture_print_module_parms(cur_ops,
"End of test: FAILURE");
else if (torture_onoff_failures())
lock_torture_print_module_parms(cur_ops,
"End of test: LOCK_HOTPLUG");
else
lock_torture_print_module_parms(cur_ops,
"End of test: SUCCESS");
}
static int __init lock_torture_init(void)
{
int i;
int firsterr = 0;
static struct lock_torture_ops *torture_ops[] = {
&lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
};
torture_init_begin(torture_type, verbose, &locktorture_runnable);
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cur_ops = torture_ops[i];
if (strcmp(torture_type, cur_ops->name) == 0)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
pr_alert("lock-torture: invalid torture type: \"%s\"\n",
torture_type);
pr_alert("lock-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
pr_alert(" %s", torture_ops[i]->name);
pr_alert("\n");
torture_init_end();
return -EINVAL;
}
if (cur_ops->init)
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
if (nwriters_stress >= 0)
nrealwriters_stress = nwriters_stress;
else
nrealwriters_stress = 2 * num_online_cpus();
lock_torture_print_module_parms(cur_ops, "Start of test");
/* Initialize the statistics so that each run gets its own numbers. */
lock_is_write_held = 0;
lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
if (lwsa == NULL) {
VERBOSE_TOROUT_STRING("lwsa: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nrealwriters_stress; i++) {
lwsa[i].n_write_lock_fail = 0;
lwsa[i].n_write_lock_acquired = 0;
}
/* Start up the kthreads. */
if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ,
onoff_interval * HZ);
if (firsterr)
goto unwind;
}
if (shuffle_interval > 0) {
firsterr = torture_shuffle_init(shuffle_interval);
if (firsterr)
goto unwind;
}
if (shutdown_secs > 0) {
firsterr = torture_shutdown_init(shutdown_secs,
lock_torture_cleanup);
if (firsterr)
goto unwind;
}
if (stutter > 0) {
firsterr = torture_stutter_init(stutter);
if (firsterr)
goto unwind;
}
writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nrealwriters_stress; i++) {
firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
writer_tasks[i]);
if (firsterr)
goto unwind;
}
if (stat_interval > 0) {
firsterr = torture_create_kthread(lock_torture_stats, NULL,
stats_task);
if (firsterr)
goto unwind;
}
torture_init_end();
return 0;
unwind:
torture_init_end();
lock_torture_cleanup();
return firsterr;
}
module_init(lock_torture_init);
module_exit(lock_torture_cleanup);
obj-y += update.o srcu.o
obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_TREE_RCU) += tree.o
obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o
obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
......
......@@ -48,110 +48,58 @@
#include <linux/slab.h>
#include <linux/trace_clock.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
MODULE_ALIAS("rcutorture");
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "rcutorture."
static int fqs_duration;
module_param(fqs_duration, int, 0444);
MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable");
static int fqs_holdoff;
module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
static int fqs_stutter = 3;
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
static bool gp_exp;
module_param(gp_exp, bool, 0444);
MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives");
static bool gp_normal;
module_param(gp_normal, bool, 0444);
MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives");
static int irqreader = 1;
module_param(irqreader, int, 0444);
MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
static int n_barrier_cbs;
module_param(n_barrier_cbs, int, 0444);
MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing");
static int nfakewriters = 4;
module_param(nfakewriters, int, 0444);
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
static int nreaders = -1;
module_param(nreaders, int, 0444);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
static int object_debug;
module_param(object_debug, int, 0444);
MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing");
static int onoff_holdoff;
module_param(onoff_holdoff, int, 0444);
MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
static int onoff_interval;
module_param(onoff_interval, int, 0444);
MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
static int shuffle_interval = 3;
module_param(shuffle_interval, int, 0444);
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
static int shutdown_secs;
module_param(shutdown_secs, int, 0444);
MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable.");
static int stall_cpu;
module_param(stall_cpu, int, 0444);
MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
static int stall_cpu_holdoff = 10;
module_param(stall_cpu_holdoff, int, 0444);
MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
static int stat_interval = 60;
module_param(stat_interval, int, 0644);
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
static int stutter = 5;
module_param(stutter, int, 0444);
MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
static int test_boost = 1;
module_param(test_boost, int, 0444);
MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
static int test_boost_duration = 4;
module_param(test_boost_duration, int, 0444);
MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
static int test_boost_interval = 7;
module_param(test_boost_interval, int, 0444);
MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
static bool test_no_idle_hz = true;
module_param(test_no_idle_hz, bool, 0444);
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
torture_param(int, fqs_duration, 0,
"Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false,
"Use normal (non-expedited) GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, n_barrier_cbs, 0,
"# of callbacks/kthreads for barrier testing");
torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, object_debug, 0,
"Enable debug-object double call_rcu() testing");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
"Time between CPU hotplugs (s), 0=disable");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10,
"Time to wait before starting stall (s).");
torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
torture_param(int, test_boost_duration, 4,
"Duration of each boost test, seconds.");
torture_param(int, test_boost_interval, 7,
"Interval between boost tests, seconds.");
torture_param(bool, test_no_idle_hz, true,
"Test support for tickless idle CPUs");
torture_param(bool, verbose, true,
"Enable verbose debugging printk()s");
static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
static bool verbose;
module_param(verbose, bool, 0444);
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
#define TORTURE_FLAG "-torture:"
#define PRINTK_STRING(s) \
do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_STRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_ERRSTRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
static int nrealreaders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
static struct task_struct *shuffler_task;
static struct task_struct *stutter_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *shutdown_task;
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct *onoff_task;
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static struct task_struct *stall_task;
static struct task_struct **barrier_cbs_tasks;
static struct task_struct *barrier_task;
......@@ -170,10 +118,10 @@ static struct rcu_torture __rcu *rcu_torture_current;
static unsigned long rcu_torture_current_version;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
{ 0 };
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
{ 0 };
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
rcu_torture_count) = { 0 };
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
rcu_torture_batch) = { 0 };
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
static atomic_t n_rcu_torture_alloc;
static atomic_t n_rcu_torture_alloc_fail;
......@@ -186,22 +134,9 @@ static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
static long n_offline_attempts;
static long n_offline_successes;
static unsigned long sum_offline;
static int min_offline = -1;
static int max_offline;
static long n_online_attempts;
static long n_online_successes;
static unsigned long sum_online;
static int min_online = -1;
static int max_online;
static long n_barrier_attempts;
static long n_barrier_successes;
static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test;
#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
#define RCUTORTURE_RUNNABLE_INIT 1
......@@ -232,7 +167,6 @@ static u64 notrace rcu_trace_clock_local(void)
}
#endif /* #else #ifdef CONFIG_RCU_TRACE */
static unsigned long shutdown_time; /* jiffies to system shutdown. */
static unsigned long boost_starttime; /* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
/* and boost task create/destroy. */
......@@ -242,51 +176,6 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
static int fullstop = FULLSTOP_RMMOD;
/*
* Protect fullstop transitions and spawning of kthreads.
*/
static DEFINE_MUTEX(fullstop_mutex);
/* Forward reference. */
static void rcu_torture_cleanup(void);
/*
* Detect and respond to a system shutdown.
*/
static int
rcutorture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
mutex_lock(&fullstop_mutex);
if (fullstop == FULLSTOP_DONTSTOP)
fullstop = FULLSTOP_SHUTDOWN;
else
pr_warn(/* but going down anyway, so... */
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
return NOTIFY_DONE;
}
/*
* Absorb kthreads into a kernel function that won't return, so that
* they won't ever access module text or data again.
*/
static void rcutorture_shutdown_absorb(const char *title)
{
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_notice(
"rcutorture thread %s parking due to system shutdown\n",
title);
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
}
}
/*
* Allocate an element from the rcu_tortures pool.
*/
......@@ -320,44 +209,6 @@ rcu_torture_free(struct rcu_torture *p)
spin_unlock_bh(&rcu_torture_lock);
}
struct rcu_random_state {
unsigned long rrs_state;
long rrs_count;
};
#define RCU_RANDOM_MULT 39916801 /* prime */
#define RCU_RANDOM_ADD 479001701 /* prime */
#define RCU_RANDOM_REFRESH 10000
#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
/*
* Crude but fast random-number generator. Uses a linear congruential
* generator, with occasional help from cpu_clock().
*/
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
if (--rrsp->rrs_count < 0) {
rrsp->rrs_state += (unsigned long)local_clock();
rrsp->rrs_count = RCU_RANDOM_REFRESH;
}
rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
return swahw32(rrsp->rrs_state);
}
static void
rcu_stutter_wait(const char *title)
{
while (stutter_pause_test || !rcutorture_runnable) {
if (rcutorture_runnable)
schedule_timeout_interruptible(1);
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
rcutorture_shutdown_absorb(title);
}
}
/*
* Operations vector for selecting different types of tests.
*/
......@@ -365,7 +216,7 @@ rcu_stutter_wait(const char *title)
struct rcu_torture_ops {
void (*init)(void);
int (*readlock)(void);
void (*read_delay)(struct rcu_random_state *rrsp);
void (*read_delay)(struct torture_random_state *rrsp);
void (*readunlock)(int idx);
int (*completed)(void);
void (*deferred_free)(struct rcu_torture *p);
......@@ -392,7 +243,7 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
return 0;
}
static void rcu_read_delay(struct rcu_random_state *rrsp)
static void rcu_read_delay(struct torture_random_state *rrsp)
{
const unsigned long shortdelay_us = 200;
const unsigned long longdelay_ms = 50;
......@@ -401,12 +252,13 @@ static void rcu_read_delay(struct rcu_random_state *rrsp)
* period, and we want a long delay occasionally to trigger
* force_quiescent_state. */
if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
if (!preempt_count() &&
!(torture_random(rrsp) % (nrealreaders * 20000)))
preempt_schedule(); /* No QS if preempt_disable() in effect */
#endif
}
......@@ -427,7 +279,7 @@ rcu_torture_cb(struct rcu_head *p)
int i;
struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
if (fullstop != FULLSTOP_DONTSTOP) {
if (torture_must_stop_irq()) {
/* Test is ending, just drop callbacks on the floor. */
/* The next initialization will pick up the pieces. */
return;
......@@ -519,6 +371,48 @@ static struct rcu_torture_ops rcu_bh_ops = {
.name = "rcu_bh"
};
/*
* Don't even think about trying any of these in real life!!!
* The names includes "busted", and they really means it!
* The only purpose of these functions is to provide a buggy RCU
* implementation to make sure that rcutorture correctly emits
* buggy-RCU error messages.
*/
static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
{
/* This is a deliberate bug for testing purposes only! */
rcu_torture_cb(&p->rtort_rcu);
}
static void synchronize_rcu_busted(void)
{
/* This is a deliberate bug for testing purposes only! */
}
static void
call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
/* This is a deliberate bug for testing purposes only! */
func(head);
}
static struct rcu_torture_ops rcu_busted_ops = {
.init = rcu_sync_torture_init,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_busted_torture_deferred_free,
.sync = synchronize_rcu_busted,
.exp_sync = synchronize_rcu_busted,
.call = call_rcu_busted,
.cb_barrier = NULL,
.fqs = NULL,
.stats = NULL,
.irq_capable = 1,
.name = "rcu_busted"
};
/*
* Definitions for srcu torture testing.
*/
......@@ -530,7 +424,7 @@ static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
return srcu_read_lock(&srcu_ctl);
}
static void srcu_read_delay(struct rcu_random_state *rrsp)
static void srcu_read_delay(struct torture_random_state *rrsp)
{
long delay;
const long uspertick = 1000000 / HZ;
......@@ -538,7 +432,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
/* We want there to be long-running readers, but not all the time. */
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
delay = torture_random(rrsp) %
(nrealreaders * 2 * longdelay * uspertick);
if (!delay)
schedule_timeout_interruptible(longdelay);
else
......@@ -677,12 +572,12 @@ static int rcu_torture_boost(void *arg)
struct rcu_boost_inflight rbi = { .inflight = 0 };
struct sched_param sp;
VERBOSE_PRINTK_STRING("rcu_torture_boost started");
VERBOSE_TOROUT_STRING("rcu_torture_boost started");
/* Set real-time priority. */
sp.sched_priority = 1;
if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
n_rcu_torture_boost_rterror++;
}
......@@ -693,9 +588,8 @@ static int rcu_torture_boost(void *arg)
oldstarttime = boost_starttime;
while (ULONG_CMP_LT(jiffies, oldstarttime)) {
schedule_timeout_interruptible(oldstarttime - jiffies);
rcu_stutter_wait("rcu_torture_boost");
if (kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP)
stutter_wait("rcu_torture_boost");
if (torture_must_stop())
goto checkwait;
}
......@@ -710,15 +604,14 @@ static int rcu_torture_boost(void *arg)
call_rcu(&rbi.rcu, rcu_torture_boost_cb);
if (jiffies - call_rcu_time >
test_boost_duration * HZ - HZ / 2) {
VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
n_rcu_torture_boost_failure++;
}
call_rcu_time = jiffies;
}
cond_resched();
rcu_stutter_wait("rcu_torture_boost");
if (kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP)
stutter_wait("rcu_torture_boost");
if (torture_must_stop())
goto checkwait;
}
......@@ -742,16 +635,17 @@ static int rcu_torture_boost(void *arg)
}
/* Go do the stutter. */
checkwait: rcu_stutter_wait("rcu_torture_boost");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
checkwait: stutter_wait("rcu_torture_boost");
} while (!torture_must_stop());
/* Clean up and exit. */
VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
rcutorture_shutdown_absorb("rcu_torture_boost");
while (!kthread_should_stop() || rbi.inflight)
while (!kthread_should_stop() || rbi.inflight) {
torture_shutdown_absorb("rcu_torture_boost");
schedule_timeout_uninterruptible(1);
}
smp_mb(); /* order accesses to ->inflight before stack-frame death. */
destroy_rcu_head_on_stack(&rbi.rcu);
torture_kthread_stopping("rcu_torture_boost");
return 0;
}
......@@ -766,7 +660,7 @@ rcu_torture_fqs(void *arg)
unsigned long fqs_resume_time;
int fqs_burst_remaining;
VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
do {
fqs_resume_time = jiffies + fqs_stutter * HZ;
while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
......@@ -780,12 +674,9 @@ rcu_torture_fqs(void *arg)
udelay(fqs_holdoff);
fqs_burst_remaining -= fqs_holdoff;
}
rcu_stutter_wait("rcu_torture_fqs");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
rcutorture_shutdown_absorb("rcu_torture_fqs");
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
stutter_wait("rcu_torture_fqs");
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_fqs");
return 0;
}
......@@ -802,9 +693,9 @@ rcu_torture_writer(void *arg)
struct rcu_torture *rp;
struct rcu_torture *rp1;
struct rcu_torture *old_rp;
static DEFINE_RCU_RANDOM(rand);
static DEFINE_TORTURE_RANDOM(rand);
VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
set_user_nice(current, 19);
do {
......@@ -813,7 +704,7 @@ rcu_torture_writer(void *arg)
if (rp == NULL)
continue;
rp->rtort_pipe_count = 0;
udelay(rcu_random(&rand) & 0x3ff);
udelay(torture_random(&rand) & 0x3ff);
old_rp = rcu_dereference_check(rcu_torture_current,
current == writer_task);
rp->rtort_mbtest = 1;
......@@ -826,7 +717,7 @@ rcu_torture_writer(void *arg)
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
if (gp_normal == gp_exp)
exp = !!(rcu_random(&rand) & 0x80);
exp = !!(torture_random(&rand) & 0x80);
else
exp = gp_exp;
if (!exp) {
......@@ -852,12 +743,9 @@ rcu_torture_writer(void *arg)
}
}
rcutorture_record_progress(++rcu_torture_current_version);
rcu_stutter_wait("rcu_torture_writer");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
rcutorture_shutdown_absorb("rcu_torture_writer");
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
stutter_wait("rcu_torture_writer");
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_writer");
return 0;
}
......@@ -868,19 +756,19 @@ rcu_torture_writer(void *arg)
static int
rcu_torture_fakewriter(void *arg)
{
DEFINE_RCU_RANDOM(rand);
DEFINE_TORTURE_RANDOM(rand);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
set_user_nice(current, 19);
do {
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
udelay(rcu_random(&rand) & 0x3ff);
schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
udelay(torture_random(&rand) & 0x3ff);
if (cur_ops->cb_barrier != NULL &&
rcu_random(&rand) % (nfakewriters * 8) == 0) {
torture_random(&rand) % (nfakewriters * 8) == 0) {
cur_ops->cb_barrier();
} else if (gp_normal == gp_exp) {
if (rcu_random(&rand) & 0x80)
if (torture_random(&rand) & 0x80)
cur_ops->sync();
else
cur_ops->exp_sync();
......@@ -889,13 +777,10 @@ rcu_torture_fakewriter(void *arg)
} else {
cur_ops->exp_sync();
}
rcu_stutter_wait("rcu_torture_fakewriter");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
stutter_wait("rcu_torture_fakewriter");
} while (!torture_must_stop());
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
rcutorture_shutdown_absorb("rcu_torture_fakewriter");
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
torture_kthread_stopping("rcu_torture_fakewriter");
return 0;
}
......@@ -921,7 +806,7 @@ static void rcu_torture_timer(unsigned long unused)
int idx;
int completed;
int completed_end;
static DEFINE_RCU_RANDOM(rand);
static DEFINE_TORTURE_RANDOM(rand);
static DEFINE_SPINLOCK(rand_lock);
struct rcu_torture *p;
int pipe_count;
......@@ -980,13 +865,13 @@ rcu_torture_reader(void *arg)
int completed;
int completed_end;
int idx;
DEFINE_RCU_RANDOM(rand);
DEFINE_TORTURE_RANDOM(rand);
struct rcu_torture *p;
int pipe_count;
struct timer_list t;
unsigned long long ts;
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
set_user_nice(current, 19);
if (irqreader && cur_ops->irq_capable)
setup_timer_on_stack(&t, rcu_torture_timer, 0);
......@@ -1034,14 +919,11 @@ rcu_torture_reader(void *arg)
preempt_enable();
cur_ops->readunlock(idx);
schedule();
rcu_stutter_wait("rcu_torture_reader");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
rcutorture_shutdown_absorb("rcu_torture_reader");
stutter_wait("rcu_torture_reader");
} while (!torture_must_stop());
if (irqreader && cur_ops->irq_capable)
del_timer_sync(&t);
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
torture_kthread_stopping("rcu_torture_reader");
return 0;
}
......@@ -1083,13 +965,7 @@ rcu_torture_printk(char *page)
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
n_rcu_torture_timers);
page += sprintf(page,
"onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
n_online_successes, n_online_attempts,
n_offline_successes, n_offline_attempts,
min_online, max_online,
min_offline, max_offline,
sum_online, sum_offline, HZ);
page = torture_onoff_stats(page);
page += sprintf(page, "barrier: %ld/%ld:%ld",
n_barrier_successes,
n_barrier_attempts,
......@@ -1150,123 +1026,17 @@ rcu_torture_stats_print(void)
/*
* Periodically prints torture statistics, if periodic statistics printing
* was specified via the stat_interval module parameter.
*
* No need to worry about fullstop here, since this one doesn't reference
* volatile state or register callbacks.
*/
static int
rcu_torture_stats(void *arg)
{
VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
do {
schedule_timeout_interruptible(stat_interval * HZ);
rcu_torture_stats_print();
rcutorture_shutdown_absorb("rcu_torture_stats");
} while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
return 0;
}
static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
* is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
*/
static void rcu_torture_shuffle_tasks(void)
{
int i;
cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (num_online_cpus() == 1) {
put_online_cpus();
return;
}
if (rcu_idle_cpu != -1)
cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
shuffle_tmp_mask);
}
if (writer_task)
set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (stutter_task)
set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
if (fqs_task)
set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
if (shutdown_task)
set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
#ifdef CONFIG_HOTPLUG_CPU
if (onoff_task)
set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
if (stall_task)
set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
if (barrier_cbs_tasks)
for (i = 0; i < n_barrier_cbs; i++)
if (barrier_cbs_tasks[i])
set_cpus_allowed_ptr(barrier_cbs_tasks[i],
shuffle_tmp_mask);
if (barrier_task)
set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
else
rcu_idle_cpu--;
put_online_cpus();
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
* system to become idle at a time and cut off its timer ticks. This is meant
* to test the support for such tickless idle CPU in RCU.
*/
static int
rcu_torture_shuffle(void *arg)
{
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
do {
schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks();
rcutorture_shutdown_absorb("rcu_torture_shuffle");
} while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
return 0;
}
/* Cause the rcutorture test to "stutter", starting and stopping all
* threads periodically.
*/
static int
rcu_torture_stutter(void *arg)
{
VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
do {
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 1;
if (!kthread_should_stop())
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 0;
rcutorture_shutdown_absorb("rcu_torture_stutter");
} while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
torture_shutdown_absorb("rcu_torture_stats");
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_stats");
return 0;
}
......@@ -1293,10 +1063,6 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
onoff_interval, onoff_holdoff);
}
static struct notifier_block rcutorture_shutdown_nb = {
.notifier_call = rcutorture_shutdown_notify,
};
static void rcutorture_booster_cleanup(int cpu)
{
struct task_struct *t;
......@@ -1304,14 +1070,12 @@ static void rcutorture_booster_cleanup(int cpu)
if (boost_tasks[cpu] == NULL)
return;
mutex_lock(&boost_mutex);
VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
t = boost_tasks[cpu];
boost_tasks[cpu] = NULL;
mutex_unlock(&boost_mutex);
/* This must be outside of the mutex, otherwise deadlock! */
kthread_stop(t);
boost_tasks[cpu] = NULL;
torture_stop_kthread(rcu_torture_boost, t);
}
static int rcutorture_booster_init(int cpu)
......@@ -1323,13 +1087,13 @@ static int rcutorture_booster_init(int cpu)
/* Don't allow time recalculation while creating a new task. */
mutex_lock(&boost_mutex);
VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
cpu_to_node(cpu),
"rcu_torture_boost");
if (IS_ERR(boost_tasks[cpu])) {
retval = PTR_ERR(boost_tasks[cpu]);
VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
n_rcu_torture_boost_ktrerror++;
boost_tasks[cpu] = NULL;
mutex_unlock(&boost_mutex);
......@@ -1341,175 +1105,6 @@ static int rcutorture_booster_init(int cpu)
return 0;
}
/*
* Cause the rcutorture test to shutdown the system after the test has
* run for the time specified by the shutdown_secs module parameter.
*/
static int
rcu_torture_shutdown(void *arg)
{
long delta;
unsigned long jiffies_snap;
VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
jiffies_snap = jiffies;
while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
!kthread_should_stop()) {
delta = shutdown_time - jiffies_snap;
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_shutdown task: %lu jiffies remaining\n",
torture_type, delta);
schedule_timeout_interruptible(delta);
jiffies_snap = jiffies;
}
if (kthread_should_stop()) {
VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
return 0;
}
/* OK, shut down the system. */
VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
shutdown_task = NULL; /* Avoid self-kill deadlock. */
rcu_torture_cleanup(); /* Get the success/failure message. */
kernel_power_off(); /* Shut down the system. */
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Execute random CPU-hotplug operations at the interval specified
* by the onoff_interval.
*/
static int
rcu_torture_onoff(void *arg)
{
int cpu;
unsigned long delta;
int maxcpu = -1;
DEFINE_RCU_RANDOM(rand);
int ret;
unsigned long starttime;
VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
for_each_online_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
if (onoff_holdoff > 0) {
VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
schedule_timeout_interruptible(onoff_holdoff * HZ);
VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
}
while (!kthread_should_stop()) {
cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: offlining %d\n",
torture_type, cpu);
starttime = jiffies;
n_offline_attempts++;
ret = cpu_down(cpu);
if (ret) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: offline %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: offlined %d\n",
torture_type, cpu);
n_offline_successes++;
delta = jiffies - starttime;
sum_offline += delta;
if (min_offline < 0) {
min_offline = delta;
max_offline = delta;
}
if (min_offline > delta)
min_offline = delta;
if (max_offline < delta)
max_offline = delta;
}
} else if (cpu_is_hotpluggable(cpu)) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: onlining %d\n",
torture_type, cpu);
starttime = jiffies;
n_online_attempts++;
ret = cpu_up(cpu);
if (ret) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: online %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: onlined %d\n",
torture_type, cpu);
n_online_successes++;
delta = jiffies - starttime;
sum_online += delta;
if (min_online < 0) {
min_online = delta;
max_online = delta;
}
if (min_online > delta)
min_online = delta;
if (max_online < delta)
max_online = delta;
}
}
schedule_timeout_interruptible(onoff_interval * HZ);
}
VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
return 0;
}
static int
rcu_torture_onoff_init(void)
{
int ret;
if (onoff_interval <= 0)
return 0;
onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
if (IS_ERR(onoff_task)) {
ret = PTR_ERR(onoff_task);
onoff_task = NULL;
return ret;
}
return 0;
}
static void rcu_torture_onoff_cleanup(void)
{
if (onoff_task == NULL)
return;
VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
kthread_stop(onoff_task);
onoff_task = NULL;
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
static int
rcu_torture_onoff_init(void)
{
return 0;
}
static void rcu_torture_onoff_cleanup(void)
{
}
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
/*
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
* induces a CPU stall for the time specified by stall_cpu.
......@@ -1518,11 +1113,11 @@ static int rcu_torture_stall(void *args)
{
unsigned long stop_at;
VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
if (stall_cpu_holdoff > 0) {
VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
}
if (!kthread_should_stop()) {
stop_at = get_seconds() + stall_cpu;
......@@ -1536,7 +1131,7 @@ static int rcu_torture_stall(void *args)
rcu_read_unlock();
pr_alert("rcu_torture_stall end.\n");
}
rcutorture_shutdown_absorb("rcu_torture_stall");
torture_shutdown_absorb("rcu_torture_stall");
while (!kthread_should_stop())
schedule_timeout_interruptible(10 * HZ);
return 0;
......@@ -1545,27 +1140,9 @@ static int rcu_torture_stall(void *args)
/* Spawn CPU-stall kthread, if stall_cpu specified. */
static int __init rcu_torture_stall_init(void)
{
int ret;
if (stall_cpu <= 0)
return 0;
stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
if (IS_ERR(stall_task)) {
ret = PTR_ERR(stall_task);
stall_task = NULL;
return ret;
}
return 0;
}
/* Clean up after the CPU-stall kthread, if one was spawned. */
static void rcu_torture_stall_cleanup(void)
{
if (stall_task == NULL)
return;
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
kthread_stop(stall_task);
stall_task = NULL;
return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
}
/* Callback function for RCU barrier testing. */
......@@ -1583,28 +1160,24 @@ static int rcu_torture_barrier_cbs(void *arg)
struct rcu_head rcu;
init_rcu_head_on_stack(&rcu);
VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started");
VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
set_user_nice(current, 19);
do {
wait_event(barrier_cbs_wq[myid],
(newphase =
ACCESS_ONCE(barrier_phase)) != lastphase ||
kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP);
torture_must_stop());
lastphase = newphase;
smp_mb(); /* ensure barrier_phase load before ->call(). */
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
if (torture_must_stop())
break;
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
if (atomic_dec_and_test(&barrier_cbs_count))
wake_up(&barrier_wq);
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping");
rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
while (!kthread_should_stop())
schedule_timeout_interruptible(1);
} while (!torture_must_stop());
cur_ops->cb_barrier();
destroy_rcu_head_on_stack(&rcu);
torture_kthread_stopping("rcu_torture_barrier_cbs");
return 0;
}
......@@ -1613,7 +1186,7 @@ static int rcu_torture_barrier(void *arg)
{
int i;
VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting");
VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
do {
atomic_set(&barrier_cbs_invoked, 0);
atomic_set(&barrier_cbs_count, n_barrier_cbs);
......@@ -1623,9 +1196,8 @@ static int rcu_torture_barrier(void *arg)
wake_up(&barrier_cbs_wq[i]);
wait_event(barrier_wq,
atomic_read(&barrier_cbs_count) == 0 ||
kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP);
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
torture_must_stop());
if (torture_must_stop())
break;
n_barrier_attempts++;
cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
......@@ -1635,11 +1207,8 @@ static int rcu_torture_barrier(void *arg)
}
n_barrier_successes++;
schedule_timeout_interruptible(HZ / 10);
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
rcutorture_shutdown_absorb("rcu_torture_barrier");
while (!kthread_should_stop())
schedule_timeout_interruptible(1);
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_barrier");
return 0;
}
......@@ -1672,24 +1241,13 @@ static int rcu_torture_barrier_init(void)
return -ENOMEM;
for (i = 0; i < n_barrier_cbs; i++) {
init_waitqueue_head(&barrier_cbs_wq[i]);
barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
(void *)(long)i,
"rcu_torture_barrier_cbs");
if (IS_ERR(barrier_cbs_tasks[i])) {
ret = PTR_ERR(barrier_cbs_tasks[i]);
VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
barrier_cbs_tasks[i] = NULL;
ret = torture_create_kthread(rcu_torture_barrier_cbs,
(void *)(long)i,
barrier_cbs_tasks[i]);
if (ret)
return ret;
}
}
barrier_task = kthread_run(rcu_torture_barrier, NULL,
"rcu_torture_barrier");
if (IS_ERR(barrier_task)) {
ret = PTR_ERR(barrier_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier");
barrier_task = NULL;
}
return 0;
return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
}
/* Clean up after RCU barrier testing. */
......@@ -1697,19 +1255,11 @@ static void rcu_torture_barrier_cleanup(void)
{
int i;
if (barrier_task != NULL) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task");
kthread_stop(barrier_task);
barrier_task = NULL;
}
torture_stop_kthread(rcu_torture_barrier, barrier_task);
if (barrier_cbs_tasks != NULL) {
for (i = 0; i < n_barrier_cbs; i++) {
if (barrier_cbs_tasks[i] != NULL) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task");
kthread_stop(barrier_cbs_tasks[i]);
barrier_cbs_tasks[i] = NULL;
}
}
for (i = 0; i < n_barrier_cbs; i++)
torture_stop_kthread(rcu_torture_barrier_cbs,
barrier_cbs_tasks[i]);
kfree(barrier_cbs_tasks);
barrier_cbs_tasks = NULL;
}
......@@ -1747,90 +1297,42 @@ rcu_torture_cleanup(void)
{
int i;
mutex_lock(&fullstop_mutex);
rcutorture_record_test_transition();
if (fullstop == FULLSTOP_SHUTDOWN) {
pr_warn(/* but going down anyway, so... */
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
schedule_timeout_uninterruptible(10);
if (torture_cleanup()) {
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
return;
}
fullstop = FULLSTOP_RMMOD;
mutex_unlock(&fullstop_mutex);
unregister_reboot_notifier(&rcutorture_shutdown_nb);
rcu_torture_barrier_cleanup();
rcu_torture_stall_cleanup();
if (stutter_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
kthread_stop(stutter_task);
}
stutter_task = NULL;
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
if (writer_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
kthread_stop(writer_task);
}
writer_task = NULL;
rcu_torture_barrier_cleanup();
torture_stop_kthread(rcu_torture_stall, stall_task);
torture_stop_kthread(rcu_torture_writer, writer_task);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++) {
if (reader_tasks[i]) {
VERBOSE_PRINTK_STRING(
"Stopping rcu_torture_reader task");
kthread_stop(reader_tasks[i]);
}
reader_tasks[i] = NULL;
}
for (i = 0; i < nrealreaders; i++)
torture_stop_kthread(rcu_torture_reader,
reader_tasks[i]);
kfree(reader_tasks);
reader_tasks = NULL;
}
rcu_torture_current = NULL;
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++) {
if (fakewriter_tasks[i]) {
VERBOSE_PRINTK_STRING(
"Stopping rcu_torture_fakewriter task");
kthread_stop(fakewriter_tasks[i]);
}
fakewriter_tasks[i] = NULL;
torture_stop_kthread(rcu_torture_fakewriter,
fakewriter_tasks[i]);
}
kfree(fakewriter_tasks);
fakewriter_tasks = NULL;
}
if (stats_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
kthread_stop(stats_task);
}
stats_task = NULL;
if (fqs_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
kthread_stop(fqs_task);
}
fqs_task = NULL;
torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task);
if ((test_boost == 1 && cur_ops->can_boost) ||
test_boost == 2) {
unregister_cpu_notifier(&rcutorture_cpu_nb);
for_each_possible_cpu(i)
rcutorture_booster_cleanup(i);
}
if (shutdown_task != NULL) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
kthread_stop(shutdown_task);
}
shutdown_task = NULL;
rcu_torture_onoff_cleanup();
/* Wait for all RCU callbacks to fire. */
......@@ -1841,8 +1343,7 @@ rcu_torture_cleanup(void)
if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
else if (n_online_successes != n_online_attempts ||
n_offline_successes != n_offline_attempts)
else if (torture_onoff_failures())
rcu_torture_print_module_parms(cur_ops,
"End of test: RCU_HOTPLUG");
else
......@@ -1911,12 +1412,11 @@ rcu_torture_init(void)
int i;
int cpu;
int firsterr = 0;
int retval;
static struct rcu_torture_ops *torture_ops[] = {
&rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
&rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops,
};
mutex_lock(&fullstop_mutex);
torture_init_begin(torture_type, verbose, &rcutorture_runnable);
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
......@@ -1931,7 +1431,7 @@ rcu_torture_init(void)
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
pr_alert(" %s", torture_ops[i]->name);
pr_alert("\n");
mutex_unlock(&fullstop_mutex);
torture_init_end();
return -EINVAL;
}
if (cur_ops->fqs == NULL && fqs_duration != 0) {
......@@ -1946,7 +1446,6 @@ rcu_torture_init(void)
else
nrealreaders = 2 * num_online_cpus();
rcu_torture_print_module_parms(cur_ops, "Start of test");
fullstop = FULLSTOP_DONTSTOP;
/* Set up the freelist. */
......@@ -1982,108 +1481,61 @@ rcu_torture_init(void)
/* Start up the kthreads. */
VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
writer_task = kthread_create(rcu_torture_writer, NULL,
"rcu_torture_writer");
if (IS_ERR(writer_task)) {
firsterr = PTR_ERR(writer_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
writer_task = NULL;
firsterr = torture_create_kthread(rcu_torture_writer, NULL,
writer_task);
if (firsterr)
goto unwind;
}
wake_up_process(writer_task);
fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
GFP_KERNEL);
if (fakewriter_tasks == NULL) {
VERBOSE_PRINTK_ERRSTRING("out of memory");
VERBOSE_TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nfakewriters; i++) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
"rcu_torture_fakewriter");
if (IS_ERR(fakewriter_tasks[i])) {
firsterr = PTR_ERR(fakewriter_tasks[i]);
VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
fakewriter_tasks[i] = NULL;
firsterr = torture_create_kthread(rcu_torture_fakewriter,
NULL, fakewriter_tasks[i]);
if (firsterr)
goto unwind;
}
}
reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_PRINTK_ERRSTRING("out of memory");
VERBOSE_TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nrealreaders; i++) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
"rcu_torture_reader");
if (IS_ERR(reader_tasks[i])) {
firsterr = PTR_ERR(reader_tasks[i]);
VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
reader_tasks[i] = NULL;
firsterr = torture_create_kthread(rcu_torture_reader, NULL,
reader_tasks[i]);
if (firsterr)
goto unwind;
}
}
if (stat_interval > 0) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
stats_task = kthread_run(rcu_torture_stats, NULL,
"rcu_torture_stats");
if (IS_ERR(stats_task)) {
firsterr = PTR_ERR(stats_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
stats_task = NULL;
firsterr = torture_create_kthread(rcu_torture_stats, NULL,
stats_task);
if (firsterr)
goto unwind;
}
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
firsterr = -ENOMEM;
VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
goto unwind;
}
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
firsterr = torture_shuffle_init(shuffle_interval * HZ);
if (firsterr)
goto unwind;
}
}
if (stutter < 0)
stutter = 0;
if (stutter) {
/* Create the stutter thread */
stutter_task = kthread_run(rcu_torture_stutter, NULL,
"rcu_torture_stutter");
if (IS_ERR(stutter_task)) {
firsterr = PTR_ERR(stutter_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
stutter_task = NULL;
firsterr = torture_stutter_init(stutter * HZ);
if (firsterr)
goto unwind;
}
}
if (fqs_duration < 0)
fqs_duration = 0;
if (fqs_duration) {
/* Create the stutter thread */
fqs_task = kthread_run(rcu_torture_fqs, NULL,
"rcu_torture_fqs");
if (IS_ERR(fqs_task)) {
firsterr = PTR_ERR(fqs_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
fqs_task = NULL;
/* Create the fqs thread */
torture_create_kthread(rcu_torture_fqs, NULL, fqs_task);
if (firsterr)
goto unwind;
}
}
if (test_boost_interval < 1)
test_boost_interval = 1;
......@@ -2097,49 +1549,31 @@ rcu_torture_init(void)
for_each_possible_cpu(i) {
if (cpu_is_offline(i))
continue; /* Heuristic: CPU can go offline. */
retval = rcutorture_booster_init(i);
if (retval < 0) {
firsterr = retval;
firsterr = rcutorture_booster_init(i);
if (firsterr)
goto unwind;
}
}
}
if (shutdown_secs > 0) {
shutdown_time = jiffies + shutdown_secs * HZ;
shutdown_task = kthread_create(rcu_torture_shutdown, NULL,
"rcu_torture_shutdown");
if (IS_ERR(shutdown_task)) {
firsterr = PTR_ERR(shutdown_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
shutdown_task = NULL;
goto unwind;
}
wake_up_process(shutdown_task);
}
i = rcu_torture_onoff_init();
if (i != 0) {
firsterr = i;
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
if (firsterr)
goto unwind;
}
register_reboot_notifier(&rcutorture_shutdown_nb);
i = rcu_torture_stall_init();
if (i != 0) {
firsterr = i;
firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
if (firsterr)
goto unwind;
}
retval = rcu_torture_barrier_init();
if (retval != 0) {
firsterr = retval;
firsterr = rcu_torture_stall_init();
if (firsterr)
goto unwind;
firsterr = rcu_torture_barrier_init();
if (firsterr)
goto unwind;
}
if (object_debug)
rcu_test_debug_objects();
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
torture_init_end();
return 0;
unwind:
mutex_unlock(&fullstop_mutex);
torture_init_end();
rcu_torture_cleanup();
return firsterr;
}
......
/*
* Common functions for in-kernel torture tests.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright (C) IBM Corporation, 2014
*
* Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/trace_clock.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
static char *torture_type;
static bool verbose;
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
#define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
#define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
static int fullstop = FULLSTOP_RMMOD;
static DEFINE_MUTEX(fullstop_mutex);
static int *torture_runnable;
#ifdef CONFIG_HOTPLUG_CPU
/*
* Variables for online-offline handling. Only present if CPU hotplug
* is enabled, otherwise does nothing.
*/
static struct task_struct *onoff_task;
static long onoff_holdoff;
static long onoff_interval;
static long n_offline_attempts;
static long n_offline_successes;
static unsigned long sum_offline;
static int min_offline = -1;
static int max_offline;
static long n_online_attempts;
static long n_online_successes;
static unsigned long sum_online;
static int min_online = -1;
static int max_online;
/*
* Execute random CPU-hotplug operations at the interval specified
* by the onoff_interval.
*/
static int
torture_onoff(void *arg)
{
int cpu;
unsigned long delta;
int maxcpu = -1;
DEFINE_TORTURE_RANDOM(rand);
int ret;
unsigned long starttime;
VERBOSE_TOROUT_STRING("torture_onoff task started");
for_each_online_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
if (onoff_holdoff > 0) {
VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
schedule_timeout_interruptible(onoff_holdoff);
VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
}
while (!torture_must_stop()) {
cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlining %d\n",
torture_type, cpu);
starttime = jiffies;
n_offline_attempts++;
ret = cpu_down(cpu);
if (ret) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offline %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlined %d\n",
torture_type, cpu);
n_offline_successes++;
delta = jiffies - starttime;
sum_offline += delta;
if (min_offline < 0) {
min_offline = delta;
max_offline = delta;
}
if (min_offline > delta)
min_offline = delta;
if (max_offline < delta)
max_offline = delta;
}
} else if (cpu_is_hotpluggable(cpu)) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: onlining %d\n",
torture_type, cpu);
starttime = jiffies;
n_online_attempts++;
ret = cpu_up(cpu);
if (ret) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: online %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: onlined %d\n",
torture_type, cpu);
n_online_successes++;
delta = jiffies - starttime;
sum_online += delta;
if (min_online < 0) {
min_online = delta;
max_online = delta;
}
if (min_online > delta)
min_online = delta;
if (max_online < delta)
max_online = delta;
}
}
schedule_timeout_interruptible(onoff_interval);
}
torture_kthread_stopping("torture_onoff");
return 0;
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
* Initiate online-offline handling.
*/
int torture_onoff_init(long ooholdoff, long oointerval)
{
int ret = 0;
#ifdef CONFIG_HOTPLUG_CPU
onoff_holdoff = ooholdoff;
onoff_interval = oointerval;
if (onoff_interval <= 0)
return 0;
ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
return ret;
}
EXPORT_SYMBOL_GPL(torture_onoff_init);
/*
* Clean up after online/offline testing.
*/
static void torture_onoff_cleanup(void)
{
#ifdef CONFIG_HOTPLUG_CPU
if (onoff_task == NULL)
return;
VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
kthread_stop(onoff_task);
onoff_task = NULL;
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
}
EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
/*
* Print online/offline testing statistics.
*/
char *torture_onoff_stats(char *page)
{
#ifdef CONFIG_HOTPLUG_CPU
page += sprintf(page,
"onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
n_online_successes, n_online_attempts,
n_offline_successes, n_offline_attempts,
min_online, max_online,
min_offline, max_offline,
sum_online, sum_offline, HZ);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
return page;
}
EXPORT_SYMBOL_GPL(torture_onoff_stats);
/*
* Were all the online/offline operations successful?
*/
bool torture_onoff_failures(void)
{
#ifdef CONFIG_HOTPLUG_CPU
return n_online_successes != n_online_attempts ||
n_offline_successes != n_offline_attempts;
#else /* #ifdef CONFIG_HOTPLUG_CPU */
return false;
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
}
EXPORT_SYMBOL_GPL(torture_onoff_failures);
#define TORTURE_RANDOM_MULT 39916801 /* prime */
#define TORTURE_RANDOM_ADD 479001701 /* prime */
#define TORTURE_RANDOM_REFRESH 10000
/*
* Crude but fast random-number generator. Uses a linear congruential
* generator, with occasional help from cpu_clock().
*/
unsigned long
torture_random(struct torture_random_state *trsp)
{
if (--trsp->trs_count < 0) {
trsp->trs_state += (unsigned long)local_clock();
trsp->trs_count = TORTURE_RANDOM_REFRESH;
}
trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
TORTURE_RANDOM_ADD;
return swahw32(trsp->trs_state);
}
EXPORT_SYMBOL_GPL(torture_random);
/*
* Variables for shuffling. The idea is to ensure that each CPU stays
* idle for an extended period to test interactions with dyntick idle,
* as well as interactions with any per-CPU varibles.
*/
struct shuffle_task {
struct list_head st_l;
struct task_struct *st_t;
};
static long shuffle_interval; /* In jiffies. */
static struct task_struct *shuffler_task;
static cpumask_var_t shuffle_tmp_mask;
static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */
static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
static DEFINE_MUTEX(shuffle_task_mutex);
/*
* Register a task to be shuffled. If there is no memory, just splat
* and don't bother registering.
*/
void torture_shuffle_task_register(struct task_struct *tp)
{
struct shuffle_task *stp;
if (WARN_ON_ONCE(tp == NULL))
return;
stp = kmalloc(sizeof(*stp), GFP_KERNEL);
if (WARN_ON_ONCE(stp == NULL))
return;
stp->st_t = tp;
mutex_lock(&shuffle_task_mutex);
list_add(&stp->st_l, &shuffle_task_list);
mutex_unlock(&shuffle_task_mutex);
}
EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
/*
* Unregister all tasks, for example, at the end of the torture run.
*/
static void torture_shuffle_task_unregister_all(void)
{
struct shuffle_task *stp;
struct shuffle_task *p;
mutex_lock(&shuffle_task_mutex);
list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
list_del(&stp->st_l);
kfree(stp);
}
mutex_unlock(&shuffle_task_mutex);
}
/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
* A special case is when shuffle_idle_cpu = -1, in which case we allow
* the tasks to run on all CPUs.
*/
static void torture_shuffle_tasks(void)
{
struct shuffle_task *stp;
cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (num_online_cpus() == 1) {
put_online_cpus();
return;
}
/* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
if (shuffle_idle_cpu >= nr_cpu_ids)
shuffle_idle_cpu = -1;
if (shuffle_idle_cpu != -1) {
cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
if (cpumask_empty(shuffle_tmp_mask)) {
put_online_cpus();
return;
}
}
mutex_lock(&shuffle_task_mutex);
list_for_each_entry(stp, &shuffle_task_list, st_l)
set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
mutex_unlock(&shuffle_task_mutex);
put_online_cpus();
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
* system to become idle at a time and cut off its timer ticks. This is meant
* to test the support for such tickless idle CPU in RCU.
*/
static int torture_shuffle(void *arg)
{
VERBOSE_TOROUT_STRING("torture_shuffle task started");
do {
schedule_timeout_interruptible(shuffle_interval);
torture_shuffle_tasks();
torture_shutdown_absorb("torture_shuffle");
} while (!torture_must_stop());
torture_kthread_stopping("torture_shuffle");
return 0;
}
/*
* Start the shuffler, with shuffint in jiffies.
*/
int torture_shuffle_init(long shuffint)
{
shuffle_interval = shuffint;
shuffle_idle_cpu = -1;
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
return -ENOMEM;
}
/* Create the shuffler thread */
return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
}
EXPORT_SYMBOL_GPL(torture_shuffle_init);
/*
* Stop the shuffling.
*/
static void torture_shuffle_cleanup(void)
{
torture_shuffle_task_unregister_all();
if (shuffler_task) {
VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
kthread_stop(shuffler_task);
free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
}
EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
/*
* Variables for auto-shutdown. This allows "lights out" torture runs
* to be fully scripted.
*/
static int shutdown_secs; /* desired test duration in seconds. */
static struct task_struct *shutdown_task;
static unsigned long shutdown_time; /* jiffies to system shutdown. */
static void (*torture_shutdown_hook)(void);
/*
* Absorb kthreads into a kernel function that won't return, so that
* they won't ever access module text or data again.
*/
void torture_shutdown_absorb(const char *title)
{
while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_notice("torture thread %s parking due to system shutdown\n",
title);
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
}
}
EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
/*
* Cause the torture test to shutdown the system after the test has
* run for the time specified by the shutdown_secs parameter.
*/
static int torture_shutdown(void *arg)
{
long delta;
unsigned long jiffies_snap;
VERBOSE_TOROUT_STRING("torture_shutdown task started");
jiffies_snap = jiffies;
while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
!torture_must_stop()) {
delta = shutdown_time - jiffies_snap;
if (verbose)
pr_alert("%s" TORTURE_FLAG
"torture_shutdown task: %lu jiffies remaining\n",
torture_type, delta);
schedule_timeout_interruptible(delta);
jiffies_snap = jiffies;
}
if (torture_must_stop()) {
torture_kthread_stopping("torture_shutdown");
return 0;
}
/* OK, shut down the system. */
VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
shutdown_task = NULL; /* Avoid self-kill deadlock. */
if (torture_shutdown_hook)
torture_shutdown_hook();
else
VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
kernel_power_off(); /* Shut down the system. */
return 0;
}
/*
* Start up the shutdown task.
*/
int torture_shutdown_init(int ssecs, void (*cleanup)(void))
{
int ret = 0;
shutdown_secs = ssecs;
torture_shutdown_hook = cleanup;
if (shutdown_secs > 0) {
shutdown_time = jiffies + shutdown_secs * HZ;
ret = torture_create_kthread(torture_shutdown, NULL,
shutdown_task);
}
return ret;
}
EXPORT_SYMBOL_GPL(torture_shutdown_init);
/*
* Detect and respond to a system shutdown.
*/
static int torture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
} else {
pr_warn("Concurrent rmmod and shutdown illegal!\n");
}
mutex_unlock(&fullstop_mutex);
return NOTIFY_DONE;
}
static struct notifier_block torture_shutdown_nb = {
.notifier_call = torture_shutdown_notify,
};
/*
* Shut down the shutdown task. Say what??? Heh! This can happen if
* the torture module gets an rmmod before the shutdown time arrives. ;-)
*/
static void torture_shutdown_cleanup(void)
{
unregister_reboot_notifier(&torture_shutdown_nb);
if (shutdown_task != NULL) {
VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
kthread_stop(shutdown_task);
}
shutdown_task = NULL;
}
/*
* Variables for stuttering, which means to periodically pause and
* restart testing in order to catch bugs that appear when load is
* suddenly applied to or removed from the system.
*/
static struct task_struct *stutter_task;
static int stutter_pause_test;
static int stutter;
/*
* Block until the stutter interval ends. This must be called periodically
* by all running kthreads that need to be subject to stuttering.
*/
void stutter_wait(const char *title)
{
while (ACCESS_ONCE(stutter_pause_test) ||
(torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
if (stutter_pause_test)
schedule_timeout_interruptible(1);
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
torture_shutdown_absorb(title);
}
}
EXPORT_SYMBOL_GPL(stutter_wait);
/*
* Cause the torture test to "stutter", starting and stopping all
* threads periodically.
*/
static int torture_stutter(void *arg)
{
VERBOSE_TOROUT_STRING("torture_stutter task started");
do {
if (!torture_must_stop()) {
schedule_timeout_interruptible(stutter);
ACCESS_ONCE(stutter_pause_test) = 1;
}
if (!torture_must_stop())
schedule_timeout_interruptible(stutter);
ACCESS_ONCE(stutter_pause_test) = 0;
torture_shutdown_absorb("torture_stutter");
} while (!torture_must_stop());
torture_kthread_stopping("torture_stutter");
return 0;
}
/*
* Initialize and kick off the torture_stutter kthread.
*/
int torture_stutter_init(int s)
{
int ret;
stutter = s;
ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
return ret;
}
EXPORT_SYMBOL_GPL(torture_stutter_init);
/*
* Cleanup after the torture_stutter kthread.
*/
static void torture_stutter_cleanup(void)
{
if (!stutter_task)
return;
VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
kthread_stop(stutter_task);
stutter_task = NULL;
}
/*
* Initialize torture module. Please note that this is -not- invoked via
* the usual module_init() mechanism, but rather by an explicit call from
* the client torture module. This call must be paired with a later
* torture_init_end().
*
* The runnable parameter points to a flag that controls whether or not
* the test is currently runnable. If there is no such flag, pass in NULL.
*/
void __init torture_init_begin(char *ttype, bool v, int *runnable)
{
mutex_lock(&fullstop_mutex);
torture_type = ttype;
verbose = v;
torture_runnable = runnable;
fullstop = FULLSTOP_DONTSTOP;
}
EXPORT_SYMBOL_GPL(torture_init_begin);
/*
* Tell the torture module that initialization is complete.
*/
void __init torture_init_end(void)
{
mutex_unlock(&fullstop_mutex);
register_reboot_notifier(&torture_shutdown_nb);
}
EXPORT_SYMBOL_GPL(torture_init_end);
/*
* Clean up torture module. Please note that this is -not- invoked via
* the usual module_exit() mechanism, but rather by an explicit call from
* the client torture module. Returns true if a race with system shutdown
* is detected, otherwise, all kthreads started by functions in this file
* will be shut down.
*
* This must be called before the caller starts shutting down its own
* kthreads.
*/
bool torture_cleanup(void)
{
mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_warn("Concurrent rmmod and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
schedule_timeout_uninterruptible(10);
return true;
}
ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
mutex_unlock(&fullstop_mutex);
torture_shutdown_cleanup();
torture_shuffle_cleanup();
torture_stutter_cleanup();
torture_onoff_cleanup();
return false;
}
EXPORT_SYMBOL_GPL(torture_cleanup);
/*
* Is it time for the current torture test to stop?
*/
bool torture_must_stop(void)
{
return torture_must_stop_irq() || kthread_should_stop();
}
EXPORT_SYMBOL_GPL(torture_must_stop);
/*
* Is it time for the current torture test to stop? This is the irq-safe
* version, hence no check for kthread_should_stop().
*/
bool torture_must_stop_irq(void)
{
return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
}
EXPORT_SYMBOL_GPL(torture_must_stop_irq);
/*
* Each kthread must wait for kthread_should_stop() before returning from
* its top-level function, otherwise segfaults ensue. This function
* prints a "stopping" message and waits for kthread_should_stop(), and
* should be called from all torture kthreads immediately prior to
* returning.
*/
void torture_kthread_stopping(char *title)
{
if (verbose)
VERBOSE_TOROUT_STRING(title);
while (!kthread_should_stop()) {
torture_shutdown_absorb(title);
schedule_timeout_uninterruptible(1);
}
}
EXPORT_SYMBOL_GPL(torture_kthread_stopping);
/*
* Create a generic torture kthread that is immediately runnable. If you
* need the kthread to be stopped so that you can do something to it before
* it starts, you will need to open-code your own.
*/
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
char *f, struct task_struct **tp)
{
int ret = 0;
VERBOSE_TOROUT_STRING(m);
*tp = kthread_run(fn, arg, s);
if (IS_ERR(*tp)) {
ret = PTR_ERR(*tp);
VERBOSE_TOROUT_ERRSTRING(f);
*tp = NULL;
}
torture_shuffle_task_register(*tp);
return ret;
}
EXPORT_SYMBOL_GPL(_torture_create_kthread);
/*
* Stop a generic kthread, emitting a message.
*/
void _torture_stop_kthread(char *m, struct task_struct **tp)
{
if (*tp == NULL)
return;
VERBOSE_TOROUT_STRING(m);
kthread_stop(*tp);
*tp = NULL;
}
EXPORT_SYMBOL_GPL(_torture_stop_kthread);
......@@ -980,6 +980,21 @@ config DEBUG_LOCKING_API_SELFTESTS
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
config LOCK_TORTURE_TEST
tristate "torture tests for locking"
depends on DEBUG_KERNEL
select TORTURE_TEST
default n
help
This option provides a kernel module that runs torture tests
on kernel locking primitives. The kernel module may be built
after the fact on the running kernel to be tested, if desired.
Say Y here if you want kernel locking-primitive torture tests
to be built into the kernel.
Say M if you want these torture tests to build as a module.
Say N if you are unsure.
endmenu # lock debugging
config TRACE_IRQFLAGS
......@@ -1141,9 +1156,14 @@ config SPARSE_RCU_POINTER
Say N if you are unsure.
config TORTURE_TEST
tristate
default n
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
select TORTURE_TEST
default n
help
This option provides a kernel module that runs torture tests
......
......@@ -96,6 +96,7 @@ identify_qemu () {
echo qemu-system-ppc64
else
echo Cannot figure out what qemu command to use! 1>&2
echo file $1 output: $u
# Usually this will be one of /usr/bin/qemu-system-*
# Use RCU_QEMU_CMD environment variable or appropriate
# argument to top-level script.
......
#!/bin/bash
#
# Analyze a given results directory for locktorture progress.
#
# Usage: sh kvm-recheck-lock.sh resdir
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# Copyright (C) IBM Corporation, 2014
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
if test -d $i
then
:
else
echo Unreadable results directory: $i
exit 1
fi
configfile=`echo $i | sed -e 's/^.*\///'`
ncs=`grep "Writes: Total:" $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* Total: //' -e 's/ .*$//'`
if test -z "$ncs"
then
echo $configfile
else
title="$configfile ------- $ncs acquisitions/releases"
dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
if test -z "$dur"
then
:
else
ncsps=`awk -v ncs=$ncs -v dur=$dur '
BEGIN { print ncs / dur }' < /dev/null`
title="$title ($ncsps per second)"
fi
echo $title
fi
#!/bin/bash
#
# Analyze a given results directory for rcutorture progress.
#
# Usage: sh kvm-recheck-rcu.sh resdir
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# Copyright (C) IBM Corporation, 2014
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
if test -d $i
then
:
else
echo Unreadable results directory: $i
exit 1
fi
configfile=`echo $i | sed -e 's/^.*\///'`
ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
if test -z "$ngps"
then
echo $configfile
else
title="$configfile ------- $ngps grace periods"
dur=`sed -e 's/^.* rcutorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
if test -z "$dur"
then
:
else
ngpsps=`awk -v ngps=$ngps -v dur=$dur '
BEGIN { print ngps / dur }' < /dev/null`
title="$title ($ngpsps per second)"
fi
echo $title
fi
#!/bin/bash
#
# Given the results directories for previous KVM runs of rcutorture,
# Given the results directories for previous KVM-based torture runs,
# check the build and console output for errors. Given a directory
# containing results directories, this recursively checks them all.
#
......@@ -27,11 +27,18 @@
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
for rd in "$@"
do
firsttime=1
dirs=`find $rd -name Make.defconfig.out -print | sort | sed -e 's,/[^/]*$,,' | sort -u`
for i in $dirs
do
configfile=`echo $i | sed -e 's/^.*\///'`
echo $configfile
if test -n "$firsttime"
then
firsttime=""
resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'`
head -1 $resdir/log
fi
TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
kvm-recheck-${TORTURE_SUITE}.sh $i
configcheck.sh $i/.config $i/ConfigFragment
parse-build.sh $i/Make.out $configfile
parse-rcutorture.sh $i/console.log $configfile
......
......@@ -6,15 +6,15 @@
# Execute this in the source tree. Do not run it as a background task
# because qemu does not seem to like that much.
#
# Usage: sh kvm-test-1-rcu.sh config builddir resdir minutes qemu-args bootargs
# Usage: sh kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
#
# qemu-args defaults to "" -- you will want "-nographic" if running headless.
# bootargs defaults to "root=/dev/sda noapic selinux=0 console=ttyS0"
# "initcall_debug debug rcutorture.stat_interval=15"
# "rcutorture.shutdown_secs=$((minutes * 60))"
# "rcutorture.rcutorture_runnable=1"
# qemu-args defaults to "-nographic", along with arguments specifying the
# number of CPUs and other options generated from
# the underlying CPU architecture.
# boot_args defaults to value returned by the per_version_boot_params
# shell function.
#
# Anything you specify for either qemu-args or bootargs is appended to
# Anything you specify for either qemu-args or boot_args is appended to
# the default values. The "-smp" value is deduced from the contents of
# the config fragment.
#
......@@ -40,32 +40,34 @@
grace=120
T=/tmp/kvm-test-1-rcu.sh.$$
T=/tmp/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
. $KVM/bin/functions.sh
. $KVPATH/ver_functions.sh
config_template=${1}
config_dir=`echo $config_template | sed -e 's,/[^/]*$,,'`
title=`echo $config_template | sed -e 's/^.*\///'`
builddir=${2}
if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
then
echo "kvm-test-1-rcu.sh :$builddir: Not a writable directory, cannot build into it"
echo "kvm-test-1-run.sh :$builddir: Not a writable directory, cannot build into it"
exit 1
fi
resdir=${3}
if test -z "$resdir" -o ! -d "$resdir" -o ! -w "$resdir"
then
echo "kvm-test-1-rcu.sh :$resdir: Not a writable directory, cannot build into it"
echo "kvm-test-1-run.sh :$resdir: Not a writable directory, cannot store results into it"
exit 1
fi
cp $config_template $resdir/ConfigFragment
echo ' ---' `date`: Starting build
echo ' ---' Kconfig fragment at: $config_template >> $resdir/log
cat << '___EOF___' >> $T
CONFIG_RCU_TORTURE_TEST=y
___EOF___
if test -r "$config_dir/CFcommon"
then
cat < $config_dir/CFcommon >> $T
fi
# Optimizations below this point
# CONFIG_USB=n
# CONFIG_SECURITY=n
......@@ -96,11 +98,23 @@ then
cp $builddir/.config $resdir
cp $builddir/arch/x86/boot/bzImage $resdir
parse-build.sh $resdir/Make.out $title
if test -f $builddir.wait
then
mv $builddir.wait $builddir.ready
fi
else
cp $builddir/Make*.out $resdir
echo Build failed, not running KVM, see $resdir.
if test -f $builddir.wait
then
mv $builddir.wait $builddir.ready
fi
exit 1
fi
while test -f $builddir.ready
do
sleep 1
done
minutes=$4
seconds=$(($minutes * 60))
qemu_args=$5
......@@ -111,9 +125,10 @@ kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
echo ' ---' `date`: Starting kernel
# Determine the appropriate flavor of qemu command.
QEMU="`identify_qemu $builddir/vmlinux.o`"
QEMU="`identify_qemu $builddir/vmlinux`"
# Generate -smp qemu argument.
qemu_args="-nographic $qemu_args"
cpu_count=`configNR_CPUS.sh $config_template`
vcpus=`identify_qemu_vcpus`
if test $cpu_count -gt $vcpus
......@@ -133,12 +148,8 @@ qemu_append="`identify_qemu_append "$QEMU"`"
# Pull in Kconfig-fragment boot parameters
boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
# Generate CPU-hotplug boot parameters
boot_args="`rcutorture_param_onoff "$boot_args" $builddir/.config`"
# Generate rcu_barrier() boot parameter
boot_args="`rcutorture_param_n_barrier_cbs "$boot_args"`"
# Pull in standard rcutorture boot arguments
boot_args="$boot_args rcutorture.stat_interval=15 rcutorture.shutdown_secs=$seconds rcutorture.rcutorture_runnable=1"
# Generate kernel-version-specific boot parameters
boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
echo $QEMU $qemu_args -m 512 -kernel $builddir/arch/x86/boot/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
if test -n "$RCU_BUILDONLY"
......@@ -188,5 +199,5 @@ then
fi
cp $builddir/console.log $resdir
parse-rcutorture.sh $resdir/console.log $title
parse-${TORTURE_SUITE}torture.sh $resdir/console.log $title
parse-console.sh $resdir/console.log $title
......@@ -30,14 +30,21 @@
scriptname=$0
args="$*"
T=/tmp/kvm.sh.$$
trap 'rm -rf $T' 0
mkdir $T
dur=30
dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
PATH=${KVM}/bin:$PATH; export PATH
builddir="${KVM}/b1"
RCU_INITRD="$KVM/initrd"; export RCU_INITRD
RCU_KMAKE_ARG=""; export RCU_KMAKE_ARG
TORTURE_SUITE=rcu
resdir=""
configs=""
cpus=0
ds=`date +%Y.%m.%d-%H:%M:%S`
kversion=""
......@@ -49,7 +56,9 @@ usage () {
echo " --builddir absolute-pathname"
echo " --buildonly"
echo " --configs \"config-file list\""
echo " --cpus N"
echo " --datestamp string"
echo " --dryrun sched|script"
echo " --duration minutes"
echo " --interactive"
echo " --kmake-arg kernel-make-arguments"
......@@ -58,8 +67,9 @@ usage () {
echo " --no-initrd"
echo " --qemu-args qemu-system-..."
echo " --qemu-cmd qemu-system-..."
echo " --results absolute-pathname"
echo " --relbuilddir relative-pathname"
echo " --results absolute-pathname"
echo " --torture rcu"
exit 1
}
......@@ -85,11 +95,21 @@ do
configs="$2"
shift
;;
--cpus)
checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--'
cpus=$2
shift
;;
--datestamp)
checkarg --datestamp "(relative pathname)" "$#" "$2" '^[^/]*$' '^--'
ds=$2
shift
;;
--dryrun)
checkarg --dryrun "sched|script" $# "$2" 'sched\|script' '^--'
dryrun=$2
shift
;;
--duration)
checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error'
dur=$2
......@@ -138,6 +158,11 @@ do
resdir=$2
shift
;;
--torture)
checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
TORTURE_SUITE=$2
shift
;;
*)
echo Unknown argument $1
usage
......@@ -146,7 +171,7 @@ do
shift
done
CONFIGFRAG=${KVM}/configs; export CONFIGFRAG
CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
KVPATH=${CONFIGFRAG}/$kversion; export KVPATH
if test -z "$configs"
......@@ -157,54 +182,231 @@ fi
if test -z "$resdir"
then
resdir=$KVM/res
if ! test -e $resdir
then
mkdir $resdir || :
fi
else
fi
if test "$dryrun" = ""
then
if ! test -e $resdir
then
mkdir -p "$resdir" || :
fi
fi
mkdir $resdir/$ds
touch $resdir/$ds/log
echo $scriptname $args >> $resdir/$ds/log
mkdir $resdir/$ds
pwd > $resdir/$ds/testid.txt
if test -d .git
then
git status >> $resdir/$ds/testid.txt
git rev-parse HEAD >> $resdir/$ds/testid.txt
fi
builddir=$KVM/b1
if ! test -e $builddir
then
mkdir $builddir || :
# Be noisy only if running the script.
echo Results directory: $resdir/$ds
echo $scriptname $args
touch $resdir/$ds/log
echo $scriptname $args >> $resdir/$ds/log
echo ${TORTURE_SUITE} > $resdir/$ds/TORTURE_SUITE
pwd > $resdir/$ds/testid.txt
if test -d .git
then
git status >> $resdir/$ds/testid.txt
git rev-parse HEAD >> $resdir/$ds/testid.txt
fi
fi
# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
touch $T/cfgcpu
for CF in $configs
do
# Running TREE01 multiple times creates TREE01, TREE01.2, TREE01.3, ...
rd=$resdir/$ds/$CF
if test -d "${rd}"
if test -f "$CONFIGFRAG/$kversion/$CF"
then
n="`ls -d "${rd}"* | grep '\.[0-9]\+$' |
sed -e 's/^.*\.\([0-9]\+\)/\1/' |
sort -k1n | tail -1`"
if test -z "$n"
then
rd="${rd}.2"
else
n="`expr $n + 1`"
rd="${rd}.${n}"
fi
echo $CF `configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF` >> $T/cfgcpu
else
echo "The --configs file $CF does not exist, terminating."
exit 1
fi
mkdir "${rd}"
echo Results directory: $rd
kvm-test-1-rcu.sh $CONFIGFRAG/$kversion/$CF $builddir $rd $dur "-nographic $RCU_QEMU_ARG" "rcutorture.test_no_idle_hz=1 rcutorture.verbose=1 $RCU_BOOTARGS"
done
sort -k2nr $T/cfgcpu > $T/cfgcpu.sort
# Use a greedy bin-packing algorithm, sorting the list accordingly.
awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
BEGIN {
njobs = 0;
}
{
# Read file of tests and corresponding required numbers of CPUs.
cf[njobs] = $1;
cpus[njobs] = $2;
njobs++;
}
END {
alldone = 0;
batch = 0;
nc = -1;
# Each pass through the following loop creates on test batch
# that can be executed concurrently given ncpus. Note that a
# given test that requires more than the available CPUs will run in
# their own batch. Such tests just have to make do with what
# is available.
while (nc != ncpus) {
batch++;
nc = ncpus;
# Each pass through the following loop considers one
# test for inclusion in the current batch.
for (i = 0; i < njobs; i++) {
if (done[i])
continue; # Already part of a batch.
if (nc >= cpus[i] || nc == ncpus) {
# This test fits into the current batch.
done[i] = batch;
nc -= cpus[i];
if (nc <= 0)
break; # Too-big test in its own batch.
}
}
}
# Dump out the tests in batch order.
for (b = 1; b <= batch; b++)
for (i = 0; i < njobs; i++)
if (done[i] == b)
print cf[i], cpus[i];
}'
# Generate a script to execute the tests in appropriate batches.
cat << ___EOF___ > $T/script
TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE
___EOF___
awk < $T/cfgcpu.pack \
-v CONFIGDIR="$CONFIGFRAG/$kversion/" \
-v KVM="$KVM" \
-v ncpus=$cpus \
-v rd=$resdir/$ds/ \
-v dur=$dur \
-v RCU_QEMU_ARG=$RCU_QEMU_ARG \
-v RCU_BOOTARGS=$RCU_BOOTARGS \
'BEGIN {
i = 0;
}
{
cf[i] = $1;
cpus[i] = $2;
i++;
}
# Dump out the scripting required to run one test batch.
function dump(first, pastlast)
{
print "echo ----Start batch: `date`";
print "echo ----Start batch: `date` >> " rd "/log";
jn=1
for (j = first; j < pastlast; j++) {
builddir=KVM "/b" jn
cpusr[jn] = cpus[j];
if (cfrep[cf[j]] == "") {
cfr[jn] = cf[j];
cfrep[cf[j]] = 1;
} else {
cfrep[cf[j]]++;
cfr[jn] = cf[j] "." cfrep[cf[j]];
}
if (cpusr[jn] > ncpus && ncpus != 0)
ovf = "(!)";
else
ovf = "";
print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log";
print "rm -f " builddir ".*";
print "touch " builddir ".wait";
print "mkdir " builddir " > /dev/null 2>&1 || :";
print "mkdir " rd cfr[jn] " || :";
print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" RCU_QEMU_ARG "\" \"" RCU_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`";
print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log";
print "while test -f " builddir ".wait"
print "do"
print "\tsleep 1"
print "done"
print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`";
print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log";
jn++;
}
for (j = 1; j < jn; j++) {
builddir=KVM "/b" j
print "rm -f " builddir ".ready"
print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
}
print "wait"
print "echo ---- All kernel runs complete. `date`";
print "echo ---- All kernel runs complete. `date` >> " rd "/log";
for (j = 1; j < jn; j++) {
builddir=KVM "/b" j
print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log";
print "cat " rd cfr[j] "/kvm-test-1-run.sh.out";
print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log";
}
}
END {
njobs = i;
nc = ncpus;
first = 0;
# Each pass through the following loop considers one test.
for (i = 0; i < njobs; i++) {
if (ncpus == 0) {
# Sequential test specified, each test its own batch.
dump(i, i + 1);
first = i;
} else if (nc < cpus[i] && i != 0) {
# Out of CPUs, dump out a batch.
dump(first, i);
first = i;
nc = ncpus;
}
# Account for the CPUs needed by the current test.
nc -= cpus[i];
}
# Dump the last batch.
if (ncpus != 0)
dump(first, i);
}' >> $T/script
if test "$dryrun" = script
then
# Dump out the script, but define the environment variables that
# it needs to run standalone.
echo CONFIGFRAG="$CONFIGFRAG; export CONFIGFRAG"
echo KVM="$KVM; export KVM"
echo KVPATH="$KVPATH; export KVPATH"
echo PATH="$PATH; export PATH"
echo RCU_BUILDONLY="$RCU_BUILDONLY; export RCU_BUILDONLY"
echo RCU_INITRD="$RCU_INITRD; export RCU_INITRD"
echo RCU_KMAKE_ARG="$RCU_KMAKE_ARG; export RCU_KMAKE_ARG"
echo RCU_QEMU_CMD="$RCU_QEMU_CMD; export RCU_QEMU_CMD"
echo RCU_QEMU_INTERACTIVE="$RCU_QEMU_INTERACTIVE; export RCU_QEMU_INTERACTIVE"
echo RCU_QEMU_MAC="$RCU_QEMU_MAC; export RCU_QEMU_MAC"
echo "mkdir -p "$resdir" || :"
echo "mkdir $resdir/$ds"
cat $T/script
exit 0
elif test "$dryrun" = sched
then
# Extract the test run schedule from the script.
egrep 'start batch|Starting build\.' $T/script |
sed -e 's/:.*$//' -e 's/^echo //'
exit 0
else
# Not a dryru, so run the script.
sh $T/script
fi
# Tracing: trace_event=rcu:rcu_grace_period,rcu:rcu_future_grace_period,rcu:rcu_grace_period_init,rcu:rcu_nocb_wake,rcu:rcu_preempt_task,rcu:rcu_unlock_preempted_task,rcu:rcu_quiescent_state_report,rcu:rcu_fqs,rcu:rcu_callback,rcu:rcu_kfree_callback,rcu:rcu_batch_start,rcu:rcu_invoke_callback,rcu:rcu_invoke_kfree_callback,rcu:rcu_batch_end,rcu:rcu_torture_read,rcu:rcu_barrier
echo
echo
echo " --- `date` Test summary:"
echo Results directory: $resdir/$ds
kvm-recheck.sh $resdir/$ds
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
CONFIG_LOCK_TORTURE_TEST=y
CONFIG_PRINTK_TIME=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
#!/bin/bash
#
# Kernel-version-dependent shell functions for the rest of the scripts.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# Copyright (C) IBM Corporation, 2014
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
# locktorture_param_onoff bootparam-string config-file
#
# Adds onoff locktorture module parameters to kernels having it.
locktorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding locktorture onoff. 1>&2
echo locktorture.onoff_interval=3 locktorture.onoff_holdoff=30
fi
}
# per_version_boot_params bootparam-string config-file seconds
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
echo $1 `locktorture_param_onoff "$1" "$2"` \
locktorture.stat_interval=15 \
locktorture.shutdown_secs=$3 \
locktorture.locktorture_runnable=1 \
locktorture.verbose=1
}
CONFIG_RCU_TRACE=n
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
CONFIG_RCU_TORTURE_TEST=y
CONFIG_PRINTK_TIME=y
CONFIG_RCU_TRACE=n
CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
CONFIG_PRINTK_TIME=y
......@@ -5,4 +5,3 @@ CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
CONFIG_PRINTK_TIME=y
......@@ -10,4 +10,3 @@ CONFIG_RCU_TRACE=n
CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PREEMPT_COUNT=n
CONFIG_PRINTK_TIME=y
......@@ -10,4 +10,3 @@ CONFIG_RCU_TRACE=y
CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PREEMPT_COUNT=y
CONFIG_PRINTK_TIME=y
......@@ -20,4 +20,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -7,7 +7,7 @@ CONFIG_PREEMPT=y
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_TRACE=n
CONFIG_HOTPLUG_CPU=n
CONFIG_SUSPEND=n
......@@ -23,4 +23,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=y
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -20,4 +20,3 @@ CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=y
CONFIG_RCU_BOOST_PRIO=2
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -22,4 +22,3 @@ CONFIG_PROVE_RCU_DELAY=n
CONFIG_RCU_CPU_STALL_INFO=y
CONFIG_RCU_CPU_STALL_VERBOSE=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -22,4 +22,3 @@ CONFIG_PROVE_RCU_DELAY=y
CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -23,4 +23,3 @@ CONFIG_PROVE_RCU_DELAY=n
CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_PRINTK_TIME=y
......@@ -21,4 +21,3 @@ CONFIG_PROVE_RCU_DELAY=n
CONFIG_RCU_CPU_STALL_INFO=y
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -23,4 +23,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -23,4 +23,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -18,4 +18,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
CONFIG_RCU_CPU_STALL_VERBOSE=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_PRINTK_TIME=y
......@@ -20,16 +20,14 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
# rcutorture_param_n_barrier_cbs bootparam-string
#
# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
rcutorture_param_n_barrier_cbs () {
echo $1
}
# rcutorture_param_onoff bootparam-string config-file
#
# Adds onoff rcutorture module parameters to kernels having it.
rcutorture_param_onoff () {
echo $1
# per_version_boot_params bootparam-string config-file seconds
#
# Adds per-version torture-module parameters to kernels supporting them.
# Which old kernels do not.
per_version_boot_params () {
echo rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
rcutorture.rcutorture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
......@@ -20,18 +20,6 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
# rcutorture_param_n_barrier_cbs bootparam-string
#
# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
rcutorture_param_n_barrier_cbs () {
if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
then
echo $1
else
echo $1 rcutorture.n_barrier_cbs=4
fi
}
# rcutorture_param_onoff bootparam-string config-file
#
# Adds onoff rcutorture module parameters to kernels having it.
......@@ -39,8 +27,18 @@ rcutorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
else
echo $1
echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
fi
}
# per_version_boot_params bootparam-string config-file seconds
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
echo $1 `rcutorture_param_onoff "$1" "$2"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
rcutorture.rcutorture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
......@@ -26,9 +26,9 @@
rcutorture_param_n_barrier_cbs () {
if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
then
echo $1
:
else
echo $1 rcutorture.n_barrier_cbs=4
echo rcutorture.n_barrier_cbs=4
fi
}
......@@ -38,9 +38,20 @@ rcutorture_param_n_barrier_cbs () {
rcutorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff.
echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
else
echo $1
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
fi
}
# per_version_boot_params bootparam-string config-file seconds
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
echo $1 `rcutorture_param_onoff "$1" "$2"` \
`rcutorture_param_n_barrier_cbs "$1"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
rcutorture.rcutorture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
......@@ -24,7 +24,12 @@
#
# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
rcutorture_param_n_barrier_cbs () {
echo $1
if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
then
:
else
echo rcutorture.n_barrier_cbs=4
fi
}
# rcutorture_param_onoff bootparam-string config-file
......@@ -33,9 +38,20 @@ rcutorture_param_n_barrier_cbs () {
rcutorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff.
echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
else
echo $1
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
fi
}
# per_version_boot_params bootparam-string config-file seconds
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
echo $1 `rcutorture_param_onoff "$1" "$2"` \
`rcutorture_param_n_barrier_cbs "$1"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
rcutorture.rcutorture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment