Commit 40c1278a authored by Paul E. McKenney's avatar Paul E. McKenney

rcutorture: Allow rcutorture without RCU Tasks Trace

Unless a kernel builds rcutorture, whether built-in or as a module, that
kernel is also built with CONFIG_TASKS_TRACE_RCU, whether anything else
needs Tasks Trace RCU or not.  This unnecessarily increases kernel size.
This commit therefore decouples the presence of rcutorture from the
presence of RCU Tasks Trace.

However, there is a need to select CONFIG_TASKS_TRACE_RCU for
testing purposes.  Except that casual users must not be bothered with
questions -- for them, this needs to be fully automated.  There is thus
a CONFIG_FORCE_TASKS_TRACE_RCU that selects CONFIG_TASKS_TRACE_RCU,
is user-selectable, but which depends on CONFIG_RCU_EXPERT.

[ paulmck: Apply kernel test robot feedback. ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 835f14ed
...@@ -95,15 +95,23 @@ config TASKS_RUDE_RCU ...@@ -95,15 +95,23 @@ config TASKS_RUDE_RCU
switches on all online CPUs, including idle ones, so use switches on all online CPUs, including idle ones, so use
with caution. with caution.
config TASKS_TRACE_RCU config FORCE_TASKS_TRACE_RCU
def_bool 0 bool "Force selection of Tasks Trace RCU"
select IRQ_WORK depends on RCU_EXPERT
select TASKS_TRACE_RCU
default n
help help
This option enables a task-based RCU implementation that uses This option enables a task-based RCU implementation that uses
explicit rcu_read_lock_trace() read-side markers, and allows explicit rcu_read_lock_trace() read-side markers, and allows
these readers to appear in the idle loop as well as on the CPU these readers to appear in the idle loop as well as on the
hotplug code paths. It can force IPIs on online CPUs, including CPU hotplug code paths. It can force IPIs on online CPUs,
idle ones, so use with caution. including idle ones, so use with caution. Not for manual
selection in most cases.
config TASKS_TRACE_RCU
bool
default n
select IRQ_WORK
config RCU_STALL_COMMON config RCU_STALL_COMMON
def_bool TREE_RCU def_bool TREE_RCU
...@@ -227,7 +235,7 @@ config RCU_NOCB_CPU ...@@ -227,7 +235,7 @@ config RCU_NOCB_CPU
config TASKS_TRACE_RCU_READ_MB config TASKS_TRACE_RCU_READ_MB
bool "Tasks Trace RCU readers use memory barriers in user and idle" bool "Tasks Trace RCU readers use memory barriers in user and idle"
depends on RCU_EXPERT depends on RCU_EXPERT && TASKS_TRACE_RCU
default PREEMPT_RT || NR_CPUS < 8 default PREEMPT_RT || NR_CPUS < 8
help help
Use this option to further reduce the number of IPIs sent Use this option to further reduce the number of IPIs sent
......
...@@ -49,7 +49,6 @@ config RCU_TORTURE_TEST ...@@ -49,7 +49,6 @@ config RCU_TORTURE_TEST
select SRCU select SRCU
select TASKS_RCU select TASKS_RCU
select TASKS_RUDE_RCU select TASKS_RUDE_RCU
select TASKS_TRACE_RCU
default n default n
help help
This option provides a kernel module that runs torture tests This option provides a kernel module that runs torture tests
......
...@@ -737,6 +737,48 @@ static struct rcu_torture_ops busted_srcud_ops = { ...@@ -737,6 +737,48 @@ static struct rcu_torture_ops busted_srcud_ops = {
.name = "busted_srcud" .name = "busted_srcud"
}; };
/*
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
* This implementation does not necessarily work well with CPU hotplug.
*/
static void synchronize_rcu_trivial(void)
{
int cpu;
for_each_online_cpu(cpu) {
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
}
}
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
{
preempt_disable();
return 0;
}
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
{
preempt_enable();
}
static struct rcu_torture_ops trivial_ops = {
.ttype = RCU_TRIVIAL_FLAVOR,
.init = rcu_sync_torture_init,
.readlock = rcu_torture_read_lock_trivial,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock_trivial,
.readlock_held = torture_readlock_not_held,
.get_gp_seq = rcu_no_completed,
.sync = synchronize_rcu_trivial,
.exp_sync = synchronize_rcu_trivial,
.fqs = NULL,
.stats = NULL,
.irq_capable = 1,
.name = "trivial"
};
/* /*
* Definitions for RCU-tasks torture testing. * Definitions for RCU-tasks torture testing.
*/ */
...@@ -780,48 +822,6 @@ static struct rcu_torture_ops tasks_ops = { ...@@ -780,48 +822,6 @@ static struct rcu_torture_ops tasks_ops = {
.name = "tasks" .name = "tasks"
}; };
/*
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
* This implementation does not necessarily work well with CPU hotplug.
*/
static void synchronize_rcu_trivial(void)
{
int cpu;
for_each_online_cpu(cpu) {
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
}
}
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
{
preempt_disable();
return 0;
}
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
{
preempt_enable();
}
static struct rcu_torture_ops trivial_ops = {
.ttype = RCU_TRIVIAL_FLAVOR,
.init = rcu_sync_torture_init,
.readlock = rcu_torture_read_lock_trivial,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock_trivial,
.readlock_held = torture_readlock_not_held,
.get_gp_seq = rcu_no_completed,
.sync = synchronize_rcu_trivial,
.exp_sync = synchronize_rcu_trivial,
.fqs = NULL,
.stats = NULL,
.irq_capable = 1,
.name = "trivial"
};
/* /*
* Definitions for rude RCU-tasks torture testing. * Definitions for rude RCU-tasks torture testing.
*/ */
...@@ -851,6 +851,8 @@ static struct rcu_torture_ops tasks_rude_ops = { ...@@ -851,6 +851,8 @@ static struct rcu_torture_ops tasks_rude_ops = {
.name = "tasks-rude" .name = "tasks-rude"
}; };
#ifdef CONFIG_TASKS_TRACE_RCU
/* /*
* Definitions for tracing RCU-tasks torture testing. * Definitions for tracing RCU-tasks torture testing.
*/ */
...@@ -893,6 +895,15 @@ static struct rcu_torture_ops tasks_tracing_ops = { ...@@ -893,6 +895,15 @@ static struct rcu_torture_ops tasks_tracing_ops = {
.name = "tasks-tracing" .name = "tasks-tracing"
}; };
#define TASKS_TRACING_OPS &tasks_tracing_ops,
#else // #ifdef CONFIG_TASKS_TRACE_RCU
#define TASKS_TRACING_OPS
#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
{ {
if (!cur_ops->gp_diff) if (!cur_ops->gp_diff)
...@@ -3096,9 +3107,9 @@ rcu_torture_init(void) ...@@ -3096,9 +3107,9 @@ rcu_torture_init(void)
int flags = 0; int flags = 0;
unsigned long gp_seq = 0; unsigned long gp_seq = 0;
static struct rcu_torture_ops *torture_ops[] = { static struct rcu_torture_ops *torture_ops[] = {
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
&busted_srcud_ops, &tasks_ops, &tasks_rude_ops, &tasks_ops, &tasks_rude_ops, TASKS_TRACING_OPS
&tasks_tracing_ops, &trivial_ops, &trivial_ops,
}; };
if (!torture_init_begin(torture_type, verbose)) if (!torture_init_begin(torture_type, verbose))
......
...@@ -7,5 +7,7 @@ CONFIG_PREEMPT=n ...@@ -7,5 +7,7 @@ CONFIG_PREEMPT=n
CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_PROVE_LOCKING=n CONFIG_PROVE_LOCKING=n
#CHECK#CONFIG_PROVE_RCU=n #CHECK#CONFIG_PROVE_RCU=n
CONFIG_FORCE_TASKS_TRACE_RCU=y
#CHECK#CONFIG_TASKS_TRACE_RCU=y
CONFIG_TASKS_TRACE_RCU_READ_MB=y CONFIG_TASKS_TRACE_RCU_READ_MB=y
CONFIG_RCU_EXPERT=y CONFIG_RCU_EXPERT=y
...@@ -7,5 +7,7 @@ CONFIG_PREEMPT=y ...@@ -7,5 +7,7 @@ CONFIG_PREEMPT=y
CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
#CHECK#CONFIG_PROVE_RCU=y #CHECK#CONFIG_PROVE_RCU=y
CONFIG_FORCE_TASKS_TRACE_RCU=y
#CHECK#CONFIG_TASKS_TRACE_RCU=y
CONFIG_TASKS_TRACE_RCU_READ_MB=n CONFIG_TASKS_TRACE_RCU_READ_MB=n
CONFIG_RCU_EXPERT=y CONFIG_RCU_EXPERT=y
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment