Commit 42528795 authored by Paul E. McKenney's avatar Paul E. McKenney

Merge branches 'doc.2015.02.26a', 'earlycb.2015.03.03a', 'fixes.2015.03.03a',...

Merge branches 'doc.2015.02.26a', 'earlycb.2015.03.03a', 'fixes.2015.03.03a', 'gpexp.2015.02.26a', 'hotplug.2015.03.20a', 'sysidle.2015.02.26b' and 'tiny.2015.02.26a' into HEAD

doc.2015.02.26a:  Documentation changes
earlycb.2015.03.03a:  Permit early-boot RCU callbacks
fixes.2015.03.03a:  Miscellaneous fixes
gpexp.2015.02.26a:  In-kernel expediting of normal grace periods
hotplug.2015.03.20a:  CPU hotplug fixes
sysidle.2015.02.26b:  NO_HZ_FULL_SYSIDLE fixes
tiny.2015.02.26a:  TINY_RCU fixes
...@@ -2968,6 +2968,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -2968,6 +2968,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Set maximum number of finished RCU callbacks to Set maximum number of finished RCU callbacks to
process in one batch. process in one batch.
rcutree.gp_init_delay= [KNL]
Set the number of jiffies to delay each step of
RCU grace-period initialization. This only has
effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT is
set.
rcutree.rcu_fanout_leaf= [KNL] rcutree.rcu_fanout_leaf= [KNL]
Increase the number of CPUs assigned to each Increase the number of CPUs assigned to each
leaf rcu_node structure. Useful for very large leaf rcu_node structure. Useful for very large
......
...@@ -413,16 +413,14 @@ int __cpu_disable(void) ...@@ -413,16 +413,14 @@ int __cpu_disable(void)
return 0; return 0;
} }
static DECLARE_COMPLETION(cpu_killed);
int __cpu_die(unsigned int cpu) int __cpu_die(unsigned int cpu)
{ {
return wait_for_completion_timeout(&cpu_killed, 5000); return cpu_wait_death(cpu, 5);
} }
void cpu_die(void) void cpu_die(void)
{ {
complete(&cpu_killed); (void)cpu_report_death();
atomic_dec(&init_mm.mm_users); atomic_dec(&init_mm.mm_users);
atomic_dec(&init_mm.mm_count); atomic_dec(&init_mm.mm_count);
......
...@@ -261,7 +261,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -261,7 +261,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static DECLARE_COMPLETION(cpu_killed);
/* /*
* __cpu_disable runs on the processor to be shutdown. * __cpu_disable runs on the processor to be shutdown.
...@@ -299,7 +298,7 @@ int __cpu_disable(void) ...@@ -299,7 +298,7 @@ int __cpu_disable(void)
*/ */
void __cpu_die(unsigned int cpu) void __cpu_die(unsigned int cpu)
{ {
if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) if (!cpu_wait_death(cpu, 1))
pr_err("CPU%u: unable to kill\n", cpu); pr_err("CPU%u: unable to kill\n", cpu);
} }
...@@ -314,7 +313,7 @@ void cpu_die(void) ...@@ -314,7 +313,7 @@ void cpu_die(void)
local_irq_disable(); local_irq_disable();
idle_task_exit(); idle_task_exit();
complete(&cpu_killed); (void)cpu_report_death();
asm ("XOR TXENABLE, D0Re0,D0Re0\n"); asm ("XOR TXENABLE, D0Re0,D0Re0\n");
} }
......
...@@ -34,8 +34,6 @@ extern int _debug_hotplug_cpu(int cpu, int action); ...@@ -34,8 +34,6 @@ extern int _debug_hotplug_cpu(int cpu, int action);
#endif #endif
#endif #endif
DECLARE_PER_CPU(int, cpu_state);
int mwait_usable(const struct cpuinfo_x86 *); int mwait_usable(const struct cpuinfo_x86 *);
#endif /* _ASM_X86_CPU_H */ #endif /* _ASM_X86_CPU_H */
...@@ -150,12 +150,12 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) ...@@ -150,12 +150,12 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
} }
void cpu_disable_common(void); void cpu_disable_common(void);
void cpu_die_common(unsigned int cpu);
void native_smp_prepare_boot_cpu(void); void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void); int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu); void native_cpu_die(unsigned int cpu);
void native_play_dead(void); void native_play_dead(void);
void play_dead_common(void); void play_dead_common(void);
......
...@@ -77,9 +77,6 @@ ...@@ -77,9 +77,6 @@
#include <asm/realmode.h> #include <asm/realmode.h>
#include <asm/misc.h> #include <asm/misc.h>
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
/* Number of siblings per CPU package */ /* Number of siblings per CPU package */
int smp_num_siblings = 1; int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(smp_num_siblings);
...@@ -257,7 +254,7 @@ static void notrace start_secondary(void *unused) ...@@ -257,7 +254,7 @@ static void notrace start_secondary(void *unused)
lock_vector_lock(); lock_vector_lock();
set_cpu_online(smp_processor_id(), true); set_cpu_online(smp_processor_id(), true);
unlock_vector_lock(); unlock_vector_lock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; cpu_set_state_online(smp_processor_id());
x86_platform.nmi_init(); x86_platform.nmi_init();
/* enable local interrupts */ /* enable local interrupts */
...@@ -948,7 +945,10 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -948,7 +945,10 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
*/ */
mtrr_save_state(); mtrr_save_state();
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* x86 CPUs take themselves offline, so delayed offline is OK. */
err = cpu_check_up_prepare(cpu);
if (err && err != -EBUSY)
return err;
/* the FPU context is blank, nobody can own it */ /* the FPU context is blank, nobody can own it */
__cpu_disable_lazy_restore(cpu); __cpu_disable_lazy_restore(cpu);
...@@ -1191,7 +1191,7 @@ void __init native_smp_prepare_boot_cpu(void) ...@@ -1191,7 +1191,7 @@ void __init native_smp_prepare_boot_cpu(void)
switch_to_new_gdt(me); switch_to_new_gdt(me);
/* already set me in cpu_online_mask in boot_cpu_init() */ /* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask); cpumask_set_cpu(me, cpu_callout_mask);
per_cpu(cpu_state, me) = CPU_ONLINE; cpu_set_state_online(me);
} }
void __init native_smp_cpus_done(unsigned int max_cpus) void __init native_smp_cpus_done(unsigned int max_cpus)
...@@ -1318,14 +1318,10 @@ static void __ref remove_cpu_from_maps(int cpu) ...@@ -1318,14 +1318,10 @@ static void __ref remove_cpu_from_maps(int cpu)
numa_remove_cpu(cpu); numa_remove_cpu(cpu);
} }
static DEFINE_PER_CPU(struct completion, die_complete);
void cpu_disable_common(void) void cpu_disable_common(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
init_completion(&per_cpu(die_complete, smp_processor_id()));
remove_siblinginfo(cpu); remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */ /* It's now safe to remove this processor from the online map */
...@@ -1349,24 +1345,27 @@ int native_cpu_disable(void) ...@@ -1349,24 +1345,27 @@ int native_cpu_disable(void)
return 0; return 0;
} }
void cpu_die_common(unsigned int cpu) int common_cpu_die(unsigned int cpu)
{ {
wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); int ret = 0;
}
void native_cpu_die(unsigned int cpu)
{
/* We don't do anything here: idle task is faking death itself. */ /* We don't do anything here: idle task is faking death itself. */
cpu_die_common(cpu);
/* They ack this in play_dead() by setting CPU_DEAD */ /* They ack this in play_dead() by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) { if (cpu_wait_death(cpu, 5)) {
if (system_state == SYSTEM_RUNNING) if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu); pr_info("CPU %u is now offline\n", cpu);
} else { } else {
pr_err("CPU %u didn't die...\n", cpu); pr_err("CPU %u didn't die...\n", cpu);
ret = -1;
} }
return ret;
}
void native_cpu_die(unsigned int cpu)
{
common_cpu_die(cpu);
} }
void play_dead_common(void) void play_dead_common(void)
...@@ -1375,10 +1374,8 @@ void play_dead_common(void) ...@@ -1375,10 +1374,8 @@ void play_dead_common(void)
reset_lazy_tlbstate(); reset_lazy_tlbstate();
amd_e400_remove_cpu(raw_smp_processor_id()); amd_e400_remove_cpu(raw_smp_processor_id());
mb();
/* Ack it */ /* Ack it */
__this_cpu_write(cpu_state, CPU_DEAD); (void)cpu_report_death();
complete(&per_cpu(die_complete, smp_processor_id()));
/* /*
* With physical CPU hotplug, we should halt the cpu * With physical CPU hotplug, we should halt the cpu
......
...@@ -90,14 +90,10 @@ static void cpu_bringup(void) ...@@ -90,14 +90,10 @@ static void cpu_bringup(void)
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
this_cpu_write(cpu_state, CPU_ONLINE); cpu_set_state_online(cpu); /* Implies full memory barrier. */
wmb();
/* We can take interrupts now: we're officially "up". */ /* We can take interrupts now: we're officially "up". */
local_irq_enable(); local_irq_enable();
wmb(); /* make sure everything is out */
} }
/* /*
...@@ -459,7 +455,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -459,7 +455,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
xen_setup_timer(cpu); xen_setup_timer(cpu);
xen_init_lock_cpu(cpu); xen_init_lock_cpu(cpu);
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /*
* PV VCPUs are always successfully taken down (see 'while' loop
* in xen_cpu_die()), so -EBUSY is an error.
*/
rc = cpu_check_up_prepare(cpu);
if (rc)
return rc;
/* make sure interrupts start blocked */ /* make sure interrupts start blocked */
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
...@@ -479,10 +481,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -479,10 +481,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
BUG_ON(rc); BUG_ON(rc);
while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { while (cpu_report_state(cpu) != CPU_ONLINE)
HYPERVISOR_sched_op(SCHEDOP_yield, NULL); HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
barrier();
}
return 0; return 0;
} }
...@@ -511,11 +511,11 @@ static void xen_cpu_die(unsigned int cpu) ...@@ -511,11 +511,11 @@ static void xen_cpu_die(unsigned int cpu)
schedule_timeout(HZ/10); schedule_timeout(HZ/10);
} }
cpu_die_common(cpu); if (common_cpu_die(cpu) == 0) {
xen_smp_intr_free(cpu);
xen_smp_intr_free(cpu); xen_uninit_lock_cpu(cpu);
xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu);
xen_teardown_timer(cpu); }
} }
static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
...@@ -747,6 +747,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) ...@@ -747,6 +747,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{ {
int rc; int rc;
/*
* This can happen if CPU was offlined earlier and
* offlining timed out in common_cpu_die().
*/
if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
}
/* /*
* xen_smp_intr_init() needs to run before native_cpu_up() * xen_smp_intr_init() needs to run before native_cpu_up()
* so that IPI vectors are set up on the booting CPU before * so that IPI vectors are set up on the booting CPU before
...@@ -768,12 +778,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -768,12 +778,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
return rc; return rc;
} }
static void xen_hvm_cpu_die(unsigned int cpu)
{
xen_cpu_die(cpu);
native_cpu_die(cpu);
}
void __init xen_hvm_smp_init(void) void __init xen_hvm_smp_init(void)
{ {
if (!xen_have_vector_callback) if (!xen_have_vector_callback)
...@@ -781,7 +785,7 @@ void __init xen_hvm_smp_init(void) ...@@ -781,7 +785,7 @@ void __init xen_hvm_smp_init(void)
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule; smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.cpu_up = xen_hvm_cpu_up; smp_ops.cpu_up = xen_hvm_cpu_up;
smp_ops.cpu_die = xen_hvm_cpu_die; smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
......
...@@ -95,6 +95,10 @@ enum { ...@@ -95,6 +95,10 @@ enum {
* Called on the new cpu, just before * Called on the new cpu, just before
* enabling interrupts. Must not sleep, * enabling interrupts. Must not sleep,
* must not fail */ * must not fail */
#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
* idle loop. */
#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress * operation in progress
...@@ -271,4 +275,14 @@ void arch_cpu_idle_enter(void); ...@@ -271,4 +275,14 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void); void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void); void arch_cpu_idle_dead(void);
DECLARE_PER_CPU(bool, cpu_dead_idle);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_CPU_H_ */ #endif /* _LINUX_CPU_H_ */
...@@ -531,8 +531,13 @@ do { \ ...@@ -531,8 +531,13 @@ do { \
# define might_lock_read(lock) do { } while (0) # define might_lock_read(lock) do { } while (0)
#endif #endif
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s); void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
}
#endif #endif
#endif /* __LINUX_LOCKDEP_H */ #endif /* __LINUX_LOCKDEP_H */
...@@ -48,6 +48,26 @@ ...@@ -48,6 +48,26 @@
extern int rcu_expedited; /* for sysctl */ extern int rcu_expedited; /* for sysctl */
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
{
return false;
}
static inline void rcu_expedite_gp(void)
{
}
static inline void rcu_unexpedite_gp(void)
{
}
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
#endif /* #else #ifdef CONFIG_TINY_RCU */
enum rcutorture_type { enum rcutorture_type {
RCU_FLAVOR, RCU_FLAVOR,
RCU_BH_FLAVOR, RCU_BH_FLAVOR,
...@@ -195,6 +215,15 @@ void call_rcu_sched(struct rcu_head *head, ...@@ -195,6 +215,15 @@ void call_rcu_sched(struct rcu_head *head,
void synchronize_sched(void); void synchronize_sched(void);
/*
* Structure allowing asynchronous waiting on RCU.
*/
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
void wakeme_after_rcu(struct rcu_head *head);
/** /**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates. * @head: structure to be used for queueing the RCU updates.
...@@ -258,6 +287,7 @@ static inline int rcu_preempt_depth(void) ...@@ -258,6 +287,7 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */ /* Internal to kernel */
void rcu_init(void); void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void); void rcu_sched_qs(void);
void rcu_bh_qs(void); void rcu_bh_qs(void);
void rcu_check_callbacks(int user); void rcu_check_callbacks(int user);
...@@ -266,6 +296,8 @@ void rcu_idle_enter(void); ...@@ -266,6 +296,8 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void); void rcu_idle_exit(void);
void rcu_irq_enter(void); void rcu_irq_enter(void);
void rcu_irq_exit(void); void rcu_irq_exit(void);
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
#ifdef CONFIG_RCU_STALL_COMMON #ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void); void rcu_sysrq_start(void);
...@@ -720,7 +752,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -720,7 +752,7 @@ static inline void rcu_preempt_sleep_check(void)
* annotated as __rcu. * annotated as __rcu.
*/ */
#define rcu_dereference_check(p, c) \ #define rcu_dereference_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
/** /**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
...@@ -730,7 +762,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -730,7 +762,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-bh counterpart to rcu_dereference_check(). * This is the RCU-bh counterpart to rcu_dereference_check().
*/ */
#define rcu_dereference_bh_check(p, c) \ #define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
/** /**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
...@@ -740,7 +772,7 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -740,7 +772,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-sched counterpart to rcu_dereference_check(). * This is the RCU-sched counterpart to rcu_dereference_check().
*/ */
#define rcu_dereference_sched_check(p, c) \ #define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu) __rcu)
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
...@@ -933,9 +965,9 @@ static inline void rcu_read_unlock(void) ...@@ -933,9 +965,9 @@ static inline void rcu_read_unlock(void)
{ {
rcu_lockdep_assert(rcu_is_watching(), rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle"); "rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU); __release(RCU);
__rcu_read_unlock(); __rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
} }
/** /**
......
...@@ -182,7 +182,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) ...@@ -182,7 +182,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
* lockdep_is_held() calls. * lockdep_is_held() calls.
*/ */
#define srcu_dereference_check(p, sp, c) \ #define srcu_dereference_check(p, sp, c) \
__rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
/** /**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
......
...@@ -791,6 +791,19 @@ config RCU_NOCB_CPU_ALL ...@@ -791,6 +791,19 @@ config RCU_NOCB_CPU_ALL
endchoice endchoice
config RCU_EXPEDITE_BOOT
bool
default n
help
This option enables expedited grace periods at boot time,
as if rcu_expedite_gp() had been invoked early in boot.
The corresponding rcu_unexpedite_gp() is invoked from
rcu_end_inkernel_boot(), which is intended to be invoked
at the end of the kernel-only boot sequence, just before
init is exec'ed.
Accept the default if unsure.
endmenu # "RCU Subsystem" endmenu # "RCU Subsystem"
config BUILD_BIN2C config BUILD_BIN2C
......
...@@ -408,8 +408,10 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -408,8 +408,10 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
* *
* Wait for the stop thread to go away. * Wait for the stop thread to go away.
*/ */
while (!idle_cpu(cpu)) while (!per_cpu(cpu_dead_idle, cpu))
cpu_relax(); cpu_relax();
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
per_cpu(cpu_dead_idle, cpu) = false;
/* This actually kills the CPU. */ /* This actually kills the CPU. */
__cpu_die(cpu); __cpu_die(cpu);
......
...@@ -853,6 +853,8 @@ rcu_torture_fqs(void *arg) ...@@ -853,6 +853,8 @@ rcu_torture_fqs(void *arg)
static int static int
rcu_torture_writer(void *arg) rcu_torture_writer(void *arg)
{ {
bool can_expedite = !rcu_gp_is_expedited();
int expediting = 0;
unsigned long gp_snap; unsigned long gp_snap;
bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
bool gp_sync1 = gp_sync; bool gp_sync1 = gp_sync;
...@@ -865,9 +867,15 @@ rcu_torture_writer(void *arg) ...@@ -865,9 +867,15 @@ rcu_torture_writer(void *arg)
int nsynctypes = 0; int nsynctypes = 0;
VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
pr_alert("%s" TORTURE_FLAG
" Grace periods expedited from boot/sysfs for %s,\n",
torture_type, cur_ops->name);
pr_alert("%s" TORTURE_FLAG
" Testing of dynamic grace-period expediting diabled.\n",
torture_type);
/* Initialize synctype[] array. If none set, take default. */ /* Initialize synctype[] array. If none set, take default. */
if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync) if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync)
synctype[nsynctypes++] = RTWS_COND_GET; synctype[nsynctypes++] = RTWS_COND_GET;
...@@ -949,9 +957,26 @@ rcu_torture_writer(void *arg) ...@@ -949,9 +957,26 @@ rcu_torture_writer(void *arg)
} }
} }
rcutorture_record_progress(++rcu_torture_current_version); rcutorture_record_progress(++rcu_torture_current_version);
/* Cycle through nesting levels of rcu_expedite_gp() calls. */
if (can_expedite &&
!(torture_random(&rand) & 0xff & (!!expediting - 1))) {
WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
if (expediting >= 0)
rcu_expedite_gp();
else
rcu_unexpedite_gp();
if (++expediting > 3)
expediting = -expediting;
}
rcu_torture_writer_state = RTWS_STUTTER; rcu_torture_writer_state = RTWS_STUTTER;
stutter_wait("rcu_torture_writer"); stutter_wait("rcu_torture_writer");
} while (!torture_must_stop()); } while (!torture_must_stop());
/* Reset expediting back to unexpedited. */
if (expediting > 0)
expediting = -expediting;
while (can_expedite && expediting++ < 0)
rcu_unexpedite_gp();
WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
rcu_torture_writer_state = RTWS_STOPPING; rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer"); torture_kthread_stopping("rcu_torture_writer");
return 0; return 0;
......
...@@ -402,23 +402,6 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, ...@@ -402,23 +402,6 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
} }
EXPORT_SYMBOL_GPL(call_srcu); EXPORT_SYMBOL_GPL(call_srcu);
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
/*
* Awaken the corresponding synchronize_srcu() instance now that a
* grace period has elapsed.
*/
static void wakeme_after_rcu(struct rcu_head *head)
{
struct rcu_synchronize *rcu;
rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion);
}
static void srcu_advance_batches(struct srcu_struct *sp, int trycount); static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
static void srcu_reschedule(struct srcu_struct *sp); static void srcu_reschedule(struct srcu_struct *sp);
...@@ -507,7 +490,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -507,7 +490,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
*/ */
void synchronize_srcu(struct srcu_struct *sp) void synchronize_srcu(struct srcu_struct *sp)
{ {
__synchronize_srcu(sp, rcu_expedited __synchronize_srcu(sp, rcu_gp_is_expedited()
? SYNCHRONIZE_SRCU_EXP_TRYCOUNT ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
: SYNCHRONIZE_SRCU_TRYCOUNT); : SYNCHRONIZE_SRCU_TRYCOUNT);
} }
......
...@@ -103,8 +103,7 @@ EXPORT_SYMBOL(__rcu_is_watching); ...@@ -103,8 +103,7 @@ EXPORT_SYMBOL(__rcu_is_watching);
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{ {
RCU_TRACE(reset_cpu_stall_ticks(rcp)); RCU_TRACE(reset_cpu_stall_ticks(rcp));
if (rcp->rcucblist != NULL && if (rcp->donetail != rcp->curtail) {
rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail; rcp->donetail = rcp->curtail;
return 1; return 1;
} }
...@@ -169,17 +168,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) ...@@ -169,17 +168,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
unsigned long flags; unsigned long flags;
RCU_TRACE(int cb_count = 0); RCU_TRACE(int cb_count = 0);
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
!!ACCESS_ONCE(rcp->rcucblist),
need_resched(),
is_idle_task(current),
false));
return;
}
/* Move the ready-to-invoke callbacks to a local list. */ /* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags); local_irq_save(flags);
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
......
This diff is collapsed.
...@@ -141,12 +141,20 @@ struct rcu_node { ...@@ -141,12 +141,20 @@ struct rcu_node {
/* complete (only for PREEMPT_RCU). */ /* complete (only for PREEMPT_RCU). */
unsigned long qsmaskinit; unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */ /* Per-GP initial value for qsmask & expmask. */
/* Initialized from ->qsmaskinitnext at the */
/* beginning of each grace period. */
unsigned long qsmaskinitnext;
/* Online CPUs for next grace period. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */ /* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */ int grplo; /* lowest-numbered CPU or group here. */
int grphi; /* highest-numbered CPU or group here. */ int grphi; /* highest-numbered CPU or group here. */
u8 grpnum; /* CPU/group number for next level up. */ u8 grpnum; /* CPU/group number for next level up. */
u8 level; /* root is at level 0. */ u8 level; /* root is at level 0. */
bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
/* exit RCU read-side critical sections */
/* before propagating offline up the */
/* rcu_node tree? */
struct rcu_node *parent; struct rcu_node *parent;
struct list_head blkd_tasks; struct list_head blkd_tasks;
/* Tasks blocked in RCU read-side critical */ /* Tasks blocked in RCU read-side critical */
...@@ -448,8 +456,6 @@ struct rcu_state { ...@@ -448,8 +456,6 @@ struct rcu_state {
long qlen; /* Total number of callbacks. */ long qlen; /* Total number of callbacks. */
/* End of fields guarded by orphan_lock. */ /* End of fields guarded by orphan_lock. */
struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
struct mutex barrier_mutex; /* Guards barrier fields. */ struct mutex barrier_mutex; /* Guards barrier fields. */
atomic_t barrier_cpu_count; /* # CPUs waiting on. */ atomic_t barrier_cpu_count; /* # CPUs waiting on. */
struct completion barrier_completion; /* Wake at barrier end. */ struct completion barrier_completion; /* Wake at barrier end. */
...@@ -559,6 +565,7 @@ static void rcu_prepare_kthreads(int cpu); ...@@ -559,6 +565,7 @@ static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(void); static void rcu_cleanup_after_idle(void);
static void rcu_prepare_for_idle(void); static void rcu_prepare_for_idle(void);
static void rcu_idle_count_callbacks_posted(void); static void rcu_idle_count_callbacks_posted(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static void print_cpu_stall_info_begin(void); static void print_cpu_stall_info_begin(void);
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void); static void print_cpu_stall_info_end(void);
......
This diff is collapsed.
...@@ -283,8 +283,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) ...@@ -283,8 +283,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n"); seq_puts(m, "\n");
level = rnp->level; level = rnp->level;
} }
seq_printf(m, "%lx/%lx %c%c>%c %d:%d ^%d ", seq_printf(m, "%lx/%lx->%lx %c%c>%c %d:%d ^%d ",
rnp->qsmask, rnp->qsmaskinit, rnp->qsmask, rnp->qsmaskinit, rnp->qsmaskinitnext,
".G"[rnp->gp_tasks != NULL], ".G"[rnp->gp_tasks != NULL],
".E"[rnp->exp_tasks != NULL], ".E"[rnp->exp_tasks != NULL],
".T"[!list_empty(&rnp->blkd_tasks)], ".T"[!list_empty(&rnp->blkd_tasks)],
......
...@@ -62,6 +62,63 @@ MODULE_ALIAS("rcupdate"); ...@@ -62,6 +62,63 @@ MODULE_ALIAS("rcupdate");
module_param(rcu_expedited, int, 0); module_param(rcu_expedited, int, 0);
#ifndef CONFIG_TINY_RCU
static atomic_t rcu_expedited_nesting =
ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
/*
* Should normal grace-period primitives be expedited? Intended for
* use within RCU. Note that this function takes the rcu_expedited
* sysfs/boot variable into account as well as the rcu_expedite_gp()
* nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
* returns false is a -really- bad idea.
*/
bool rcu_gp_is_expedited(void)
{
return rcu_expedited || atomic_read(&rcu_expedited_nesting);
}
EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
/**
* rcu_expedite_gp - Expedite future RCU grace periods
*
* After a call to this function, future calls to synchronize_rcu() and
* friends act as the corresponding synchronize_rcu_expedited() function
* had instead been called.
*/
void rcu_expedite_gp(void)
{
atomic_inc(&rcu_expedited_nesting);
}
EXPORT_SYMBOL_GPL(rcu_expedite_gp);
/**
* rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
*
* Undo a prior call to rcu_expedite_gp(). If all prior calls to
* rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
* and if the rcu_expedited sysfs/boot parameter is not set, then all
* subsequent calls to synchronize_rcu() and friends will return to
* their normal non-expedited behavior.
*/
void rcu_unexpedite_gp(void)
{
atomic_dec(&rcu_expedited_nesting);
}
EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
#endif /* #ifndef CONFIG_TINY_RCU */
/*
* Inform RCU of the end of the in-kernel boot sequence.
*/
void rcu_end_inkernel_boot(void)
{
if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
rcu_unexpedite_gp();
}
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_PREEMPT_RCU
/* /*
...@@ -199,16 +256,13 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ...@@ -199,16 +256,13 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
struct rcu_synchronize { /**
struct rcu_head head; * wakeme_after_rcu() - Callback function to awaken a task after grace period
struct completion completion; * @head: Pointer to rcu_head member within rcu_synchronize structure
}; *
* Awaken the corresponding task now that a grace period has elapsed.
/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
*/ */
static void wakeme_after_rcu(struct rcu_head *head) void wakeme_after_rcu(struct rcu_head *head)
{ {
struct rcu_synchronize *rcu; struct rcu_synchronize *rcu;
......
...@@ -198,6 +198,8 @@ static void cpuidle_idle_call(void) ...@@ -198,6 +198,8 @@ static void cpuidle_idle_call(void)
start_critical_timings(); start_critical_timings();
} }
DEFINE_PER_CPU(bool, cpu_dead_idle);
/* /*
* Generic idle loop implementation * Generic idle loop implementation
* *
...@@ -222,8 +224,13 @@ static void cpu_idle_loop(void) ...@@ -222,8 +224,13 @@ static void cpu_idle_loop(void)
check_pgt_cache(); check_pgt_cache();
rmb(); rmb();
if (cpu_is_offline(smp_processor_id())) if (cpu_is_offline(smp_processor_id())) {
rcu_cpu_notify(NULL, CPU_DYING_IDLE,
(void *)(long)smp_processor_id());
smp_mb(); /* all activity before dead. */
this_cpu_write(cpu_dead_idle, true);
arch_cpu_idle_dead(); arch_cpu_idle_dead();
}
local_irq_disable(); local_irq_disable();
arch_cpu_idle_enter(); arch_cpu_idle_enter();
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -314,3 +315,158 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) ...@@ -314,3 +315,158 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
put_online_cpus(); put_online_cpus();
} }
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
/*
* Called to poll specified CPU's state, for example, when waiting for
* a CPU to come online.
*/
int cpu_report_state(int cpu)
{
return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
}
/*
* If CPU has died properly, set its state to CPU_UP_PREPARE and
* return success. Otherwise, return -EBUSY if the CPU died after
* cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN
* if cpu_wait_death() timed out and the CPU still hasn't gotten around
* to dying. In the latter two cases, the CPU might not be set up
* properly, but it is up to the arch-specific code to decide.
* Finally, -EIO indicates an unanticipated problem.
*
* Note that it is permissible to omit this call entirely, as is
* done in architectures that do no CPU-hotplug error checking.
*/
int cpu_check_up_prepare(int cpu)
{
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
return 0;
}
switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
case CPU_POST_DEAD:
/* The CPU died properly, so just start it up again. */
atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
return 0;
case CPU_DEAD_FROZEN:
/*
* Timeout during CPU death, so let caller know.
* The outgoing CPU completed its processing, but after
* cpu_wait_death() timed out and reported the error. The
* caller is free to proceed, in which case the state
* will be reset properly by cpu_set_state_online().
* Proceeding despite this -EBUSY return makes sense
* for systems where the outgoing CPUs take themselves
* offline, with no post-death manipulation required from
* a surviving CPU.
*/
return -EBUSY;
case CPU_BROKEN:
/*
* The most likely reason we got here is that there was
* a timeout during CPU death, and the outgoing CPU never
* did complete its processing. This could happen on
* a virtualized system if the outgoing VCPU gets preempted
* for more than five seconds, and the user attempts to
* immediately online that same CPU. Trying again later
* might return -EBUSY above, hence -EAGAIN.
*/
return -EAGAIN;
default:
/* Should not happen. Famous last words. */
return -EIO;
}
}
/*
* Mark the specified CPU online.
*
* Note that it is permissible to omit this call entirely, as is
* done in architectures that do no CPU-hotplug error checking.
*/
void cpu_set_state_online(int cpu)
{
(void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Wait for the specified CPU to exit the idle loop and die.
*/
bool cpu_wait_death(unsigned int cpu, int seconds)
{
int jf_left = seconds * HZ;
int oldstate;
bool ret = true;
int sleep_jf = 1;
might_sleep();
/* The outgoing CPU will normally get done quite quickly. */
if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
goto update_state;
udelay(5);
/* But if the outgoing CPU dawdles, wait increasingly long times. */
while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
schedule_timeout_uninterruptible(sleep_jf);
jf_left -= sleep_jf;
if (jf_left <= 0)
break;
sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
}
update_state:
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
if (oldstate == CPU_DEAD) {
/* Outgoing CPU died normally, update state. */
smp_mb(); /* atomic_read() before update. */
atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
} else {
/* Outgoing CPU still hasn't died, set state accordingly. */
if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
oldstate, CPU_BROKEN) != oldstate)
goto update_state;
ret = false;
}
return ret;
}
/*
* Called by the outgoing CPU to report its successful death. Return
* false if this report follows the surviving CPU's timing out.
*
* A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
* timed out. This approach allows architectures to omit calls to
* cpu_check_up_prepare() and cpu_set_state_online() without defeating
* the next cpu_wait_death()'s polling loop.
*/
bool cpu_report_death(void)
{
int oldstate;
int newstate;
int cpu = smp_processor_id();
do {
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
if (oldstate != CPU_BROKEN)
newstate = CPU_DEAD;
else
newstate = CPU_DEAD_FROZEN;
} while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
oldstate, newstate) != oldstate);
return newstate == CPU_DEAD;
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
...@@ -1180,16 +1180,7 @@ config DEBUG_CREDENTIALS ...@@ -1180,16 +1180,7 @@ config DEBUG_CREDENTIALS
menu "RCU Debugging" menu "RCU Debugging"
config PROVE_RCU config PROVE_RCU
bool "RCU debugging: prove RCU correctness" def_bool PROVE_LOCKING
depends on PROVE_LOCKING
default n
help
This feature enables lockdep extensions that check for correct
use of RCU APIs. This is currently under development. Say Y
if you want to debug RCU usage or help work on the PROVE_RCU
feature.
Say N if you are unsure.
config PROVE_RCU_REPEATEDLY config PROVE_RCU_REPEATEDLY
bool "RCU debugging: don't disable PROVE_RCU on first splat" bool "RCU debugging: don't disable PROVE_RCU on first splat"
...@@ -1257,6 +1248,30 @@ config RCU_TORTURE_TEST_RUNNABLE ...@@ -1257,6 +1248,30 @@ config RCU_TORTURE_TEST_RUNNABLE
Say N here if you want the RCU torture tests to start only Say N here if you want the RCU torture tests to start only
after being manually enabled via /proc. after being manually enabled via /proc.
config RCU_TORTURE_TEST_SLOW_INIT
bool "Slow down RCU grace-period initialization to expose races"
depends on RCU_TORTURE_TEST
help
This option makes grace-period initialization block for a
few jiffies between initializing each pair of consecutive
rcu_node structures. This helps to expose races involving
grace-period initialization, in other words, it makes your
kernel less stable. It can also greatly increase grace-period
latency, especially on systems with large numbers of CPUs.
This is useful when torture-testing RCU, but in almost no
other circumstance.
Say Y here if you want your system to crash and hang more often.
Say N if you want a sane system.
config RCU_TORTURE_TEST_SLOW_INIT_DELAY
int "How much to slow down RCU grace-period initialization"
range 0 5
default 3
help
This option specifies the number of jiffies to wait between
each rcu_node structure initialization.
config RCU_CPU_STALL_TIMEOUT config RCU_CPU_STALL_TIMEOUT
int "RCU CPU stall timeout in seconds" int "RCU CPU stall timeout in seconds"
depends on RCU_STALL_COMMON depends on RCU_STALL_COMMON
......
...@@ -310,7 +310,7 @@ function dump(first, pastlast) ...@@ -310,7 +310,7 @@ function dump(first, pastlast)
cfr[jn] = cf[j] "." cfrep[cf[j]]; cfr[jn] = cf[j] "." cfrep[cf[j]];
} }
if (cpusr[jn] > ncpus && ncpus != 0) if (cpusr[jn] > ncpus && ncpus != 0)
ovf = "(!)"; ovf = "-ovf";
else else
ovf = ""; ovf = "";
print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`"; print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
......
CONFIG_RCU_TORTURE_TEST=y CONFIG_RCU_TORTURE_TEST=y
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment