Commit 3808dc9f authored by Paul E. McKenney's avatar Paul E. McKenney

rcutorture: Abstract torture_shuffle()

The torture_shuffle() function forces each CPU in turn to go idle
periodically in order to check for problems interacting with per-CPU
variables and with dyntick-idle mode.  Because this sort of debugging
is not specific to RCU, this commit abstracts that functionality.
This in turn requires abstracting some additional infrastructure.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
parent f67a3356
...@@ -69,6 +69,11 @@ struct torture_random_state { ...@@ -69,6 +69,11 @@ struct torture_random_state {
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
unsigned long torture_random(struct torture_random_state *trsp); unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
void torture_shuffle_task_register(struct task_struct *tp);
int torture_shuffle_init(long shuffint);
void torture_shuffle_cleanup(void);
/* Shutdown task absorption, for when the tasks cannot safely be killed. */ /* Shutdown task absorption, for when the tasks cannot safely be killed. */
void torture_shutdown_absorb(const char *title); void torture_shutdown_absorb(const char *title);
......
...@@ -106,7 +106,6 @@ static struct task_struct *writer_task; ...@@ -106,7 +106,6 @@ static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks; static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks; static struct task_struct **reader_tasks;
static struct task_struct *stats_task; static struct task_struct *stats_task;
static struct task_struct *shuffler_task;
static struct task_struct *stutter_task; static struct task_struct *stutter_task;
static struct task_struct *fqs_task; static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS]; static struct task_struct *boost_tasks[NR_CPUS];
...@@ -161,7 +160,6 @@ static int max_online; ...@@ -161,7 +160,6 @@ static int max_online;
static long n_barrier_attempts; static long n_barrier_attempts;
static long n_barrier_successes; static long n_barrier_successes;
static struct list_head rcu_torture_removed; static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test; static int stutter_pause_test;
...@@ -1080,90 +1078,6 @@ rcu_torture_stats(void *arg) ...@@ -1080,90 +1078,6 @@ rcu_torture_stats(void *arg)
return 0; return 0;
} }
static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
* is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
*/
static void rcu_torture_shuffle_tasks(void)
{
int i;
cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (num_online_cpus() == 1) {
put_online_cpus();
return;
}
if (rcu_idle_cpu != -1)
cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
shuffle_tmp_mask);
}
if (writer_task)
set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (stutter_task)
set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
if (fqs_task)
set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
if (shutdown_task)
set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
#ifdef CONFIG_HOTPLUG_CPU
if (onoff_task)
set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
if (stall_task)
set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
if (barrier_cbs_tasks)
for (i = 0; i < n_barrier_cbs; i++)
if (barrier_cbs_tasks[i])
set_cpus_allowed_ptr(barrier_cbs_tasks[i],
shuffle_tmp_mask);
if (barrier_task)
set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
else
rcu_idle_cpu--;
put_online_cpus();
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
* system to become idle at a time and cut off its timer ticks. This is meant
* to test the support for such tickless idle CPU in RCU.
*/
static int
rcu_torture_shuffle(void *arg)
{
VERBOSE_TOROUT_STRING("rcu_torture_shuffle task started");
do {
schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks();
torture_shutdown_absorb("rcu_torture_shuffle");
} while (!kthread_should_stop());
VERBOSE_TOROUT_STRING("rcu_torture_shuffle task stopping");
return 0;
}
/* Cause the rcutorture test to "stutter", starting and stopping all /* Cause the rcutorture test to "stutter", starting and stopping all
* threads periodically. * threads periodically.
*/ */
...@@ -1397,6 +1311,7 @@ rcu_torture_onoff_init(void) ...@@ -1397,6 +1311,7 @@ rcu_torture_onoff_init(void)
onoff_task = NULL; onoff_task = NULL;
return ret; return ret;
} }
torture_shuffle_task_register(onoff_task);
return 0; return 0;
} }
...@@ -1468,6 +1383,7 @@ static int __init rcu_torture_stall_init(void) ...@@ -1468,6 +1383,7 @@ static int __init rcu_torture_stall_init(void)
stall_task = NULL; stall_task = NULL;
return ret; return ret;
} }
torture_shuffle_task_register(stall_task);
return 0; return 0;
} }
...@@ -1594,6 +1510,7 @@ static int rcu_torture_barrier_init(void) ...@@ -1594,6 +1510,7 @@ static int rcu_torture_barrier_init(void)
barrier_cbs_tasks[i] = NULL; barrier_cbs_tasks[i] = NULL;
return ret; return ret;
} }
torture_shuffle_task_register(barrier_cbs_tasks[i]);
} }
barrier_task = kthread_run(rcu_torture_barrier, NULL, barrier_task = kthread_run(rcu_torture_barrier, NULL,
"rcu_torture_barrier"); "rcu_torture_barrier");
...@@ -1602,6 +1519,7 @@ static int rcu_torture_barrier_init(void) ...@@ -1602,6 +1519,7 @@ static int rcu_torture_barrier_init(void)
VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier"); VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier");
barrier_task = NULL; barrier_task = NULL;
} }
torture_shuffle_task_register(barrier_task);
return 0; return 0;
} }
...@@ -1674,6 +1592,8 @@ rcu_torture_cleanup(void) ...@@ -1674,6 +1592,8 @@ rcu_torture_cleanup(void)
fullstop = FULLSTOP_RMMOD; fullstop = FULLSTOP_RMMOD;
mutex_unlock(&fullstop_mutex); mutex_unlock(&fullstop_mutex);
unregister_reboot_notifier(&rcutorture_shutdown_nb); unregister_reboot_notifier(&rcutorture_shutdown_nb);
torture_shuffle_cleanup(); /* Must be first task cleaned up. */
rcu_torture_barrier_cleanup(); rcu_torture_barrier_cleanup();
rcu_torture_stall_cleanup(); rcu_torture_stall_cleanup();
if (stutter_task) { if (stutter_task) {
...@@ -1681,12 +1601,6 @@ rcu_torture_cleanup(void) ...@@ -1681,12 +1601,6 @@ rcu_torture_cleanup(void)
kthread_stop(stutter_task); kthread_stop(stutter_task);
} }
stutter_task = NULL; stutter_task = NULL;
if (shuffler_task) {
VERBOSE_TOROUT_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
if (writer_task) { if (writer_task) {
VERBOSE_TOROUT_STRING("Stopping rcu_torture_writer task"); VERBOSE_TOROUT_STRING("Stopping rcu_torture_writer task");
...@@ -1904,6 +1818,7 @@ rcu_torture_init(void) ...@@ -1904,6 +1818,7 @@ rcu_torture_init(void)
writer_task = NULL; writer_task = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(writer_task);
wake_up_process(writer_task); wake_up_process(writer_task);
fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
GFP_KERNEL); GFP_KERNEL);
...@@ -1922,6 +1837,7 @@ rcu_torture_init(void) ...@@ -1922,6 +1837,7 @@ rcu_torture_init(void)
fakewriter_tasks[i] = NULL; fakewriter_tasks[i] = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(fakewriter_tasks[i]);
} }
reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
GFP_KERNEL); GFP_KERNEL);
...@@ -1940,6 +1856,7 @@ rcu_torture_init(void) ...@@ -1940,6 +1856,7 @@ rcu_torture_init(void)
reader_tasks[i] = NULL; reader_tasks[i] = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(reader_tasks[i]);
} }
if (stat_interval > 0) { if (stat_interval > 0) {
VERBOSE_TOROUT_STRING("Creating rcu_torture_stats task"); VERBOSE_TOROUT_STRING("Creating rcu_torture_stats task");
...@@ -1951,27 +1868,13 @@ rcu_torture_init(void) ...@@ -1951,27 +1868,13 @@ rcu_torture_init(void)
stats_task = NULL; stats_task = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(stats_task);
} }
if (test_no_idle_hz) { if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1; firsterr = torture_shuffle_init(shuffle_interval * HZ);
if (firsterr)
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
firsterr = -ENOMEM;
VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
goto unwind; goto unwind;
} }
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_TOROUT_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
goto unwind;
}
}
if (stutter < 0) if (stutter < 0)
stutter = 0; stutter = 0;
if (stutter) { if (stutter) {
...@@ -1984,6 +1887,7 @@ rcu_torture_init(void) ...@@ -1984,6 +1887,7 @@ rcu_torture_init(void)
stutter_task = NULL; stutter_task = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(stutter_task);
} }
if (fqs_duration < 0) if (fqs_duration < 0)
fqs_duration = 0; fqs_duration = 0;
...@@ -1997,6 +1901,7 @@ rcu_torture_init(void) ...@@ -1997,6 +1901,7 @@ rcu_torture_init(void)
fqs_task = NULL; fqs_task = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(fqs_task);
} }
if (test_boost_interval < 1) if (test_boost_interval < 1)
test_boost_interval = 1; test_boost_interval = 1;
...@@ -2027,6 +1932,7 @@ rcu_torture_init(void) ...@@ -2027,6 +1932,7 @@ rcu_torture_init(void)
shutdown_task = NULL; shutdown_task = NULL;
goto unwind; goto unwind;
} }
torture_shuffle_task_register(shutdown_task);
wake_up_process(shutdown_task); wake_up_process(shutdown_task);
} }
i = rcu_torture_onoff_init(); i = rcu_torture_onoff_init();
......
...@@ -75,6 +75,157 @@ torture_random(struct torture_random_state *trsp) ...@@ -75,6 +75,157 @@ torture_random(struct torture_random_state *trsp)
} }
EXPORT_SYMBOL_GPL(torture_random); EXPORT_SYMBOL_GPL(torture_random);
/*
* Variables for shuffling. The idea is to ensure that each CPU stays
* idle for an extended period to test interactions with dyntick idle,
* as well as interactions with any per-CPU varibles.
*/
struct shuffle_task {
struct list_head st_l;
struct task_struct *st_t;
};
static long shuffle_interval; /* In jiffies. */
static struct task_struct *shuffler_task;
static cpumask_var_t shuffle_tmp_mask;
static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */
static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
static DEFINE_MUTEX(shuffle_task_mutex);
/*
* Register a task to be shuffled. If there is no memory, just splat
* and don't bother registering.
*/
void torture_shuffle_task_register(struct task_struct *tp)
{
struct shuffle_task *stp;
if (WARN_ON_ONCE(tp == NULL))
return;
stp = kmalloc(sizeof(*stp), GFP_KERNEL);
if (WARN_ON_ONCE(stp == NULL))
return;
stp->st_t = tp;
mutex_lock(&shuffle_task_mutex);
list_add(&stp->st_l, &shuffle_task_list);
mutex_unlock(&shuffle_task_mutex);
}
EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
/*
* Unregister all tasks, for example, at the end of the torture run.
*/
static void torture_shuffle_task_unregister_all(void)
{
struct shuffle_task *stp;
struct shuffle_task *p;
mutex_lock(&shuffle_task_mutex);
list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
list_del(&stp->st_l);
kfree(stp);
}
mutex_unlock(&shuffle_task_mutex);
}
/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
* A special case is when shuffle_idle_cpu = -1, in which case we allow
* the tasks to run on all CPUs.
*/
static void torture_shuffle_tasks(void)
{
struct shuffle_task *stp;
cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (num_online_cpus() == 1) {
put_online_cpus();
return;
}
/* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
if (shuffle_idle_cpu >= nr_cpu_ids)
shuffle_idle_cpu = -1;
if (shuffle_idle_cpu != -1) {
cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
if (cpumask_empty(shuffle_tmp_mask)) {
put_online_cpus();
return;
}
}
mutex_lock(&shuffle_task_mutex);
list_for_each_entry(stp, &shuffle_task_list, st_l)
set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
mutex_unlock(&shuffle_task_mutex);
put_online_cpus();
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
* system to become idle at a time and cut off its timer ticks. This is meant
* to test the support for such tickless idle CPU in RCU.
*/
static int torture_shuffle(void *arg)
{
VERBOSE_TOROUT_STRING("torture_shuffle task started");
do {
schedule_timeout_interruptible(shuffle_interval);
torture_shuffle_tasks();
torture_shutdown_absorb("torture_shuffle");
} while (!torture_must_stop());
VERBOSE_TOROUT_STRING("torture_shuffle task stopping");
return 0;
}
/*
* Start the shuffler, with shuffint in jiffies.
*/
int torture_shuffle_init(long shuffint)
{
int ret;
shuffle_interval = shuffint;
shuffle_idle_cpu = -1;
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
return -ENOMEM;
}
/* Create the shuffler thread */
shuffler_task = kthread_run(torture_shuffle, NULL, "torture_shuffle");
if (IS_ERR(shuffler_task)) {
ret = PTR_ERR(shuffler_task);
free_cpumask_var(shuffle_tmp_mask);
VERBOSE_TOROUT_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
return ret;
}
torture_shuffle_task_register(shuffler_task);
return 0;
}
EXPORT_SYMBOL_GPL(torture_shuffle_init);
/*
* Stop the shuffling.
*/
void torture_shuffle_cleanup(void)
{
torture_shuffle_task_unregister_all();
if (shuffler_task) {
VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
kthread_stop(shuffler_task);
free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
}
EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
/* /*
* Absorb kthreads into a kernel function that won't return, so that * Absorb kthreads into a kernel function that won't return, so that
* they won't ever access module text or data again. * they won't ever access module text or data again.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment