Commit 35dc0352 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rcu.2022.03.13a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull RCU updates from Paul McKenney:

 - Fix idle detection (Neeraj Upadhyay) and missing access marking
   detected by KCSAN.

 - Reduce coupling between rcu_barrier() and CPU-hotplug operations, so
   that rcu_barrier() no longer needs to do cpus_read_lock(). This may
   also someday allow system boot to bring CPUs online concurrently.

 - Enable more aggressive movement to per-CPU queueing when reacting to
   excessive lock contention due to workloads placing heavy update-side
   stress on RCU tasks.

 - Improvements to RCU priority boosting, including changes from Neeraj
   Upadhyay, Zqiang, and Alison Chaiken.

 - Various fixes improving test robustness and debug information.

 - Add tests for SRCU size transitions, further compress torture.sh
   build products, and improve debug output.

 - Miscellaneous fixes.

* tag 'rcu.2022.03.13a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (49 commits)
  rcu: Replace cpumask_weight with cpumask_empty where appropriate
  rcu: Remove __read_mostly annotations from rcu_scheduler_active externs
  rcu: Uninline multi-use function: finish_rcuwait()
  rcu: Mark writes to the rcu_segcblist structure's ->flags field
  kasan: Record work creation stack trace with interrupts enabled
  rcu: Inline __call_rcu() into call_rcu()
  rcu: Add mutex for rcu boost kthread spawning and affinity setting
  rcu: Fix description of kvfree_rcu()
  MAINTAINERS:  Add Frederic and Neeraj to their RCU files
  rcutorture: Provide non-power-of-two Tasks RCU scenarios
  rcutorture: Test SRCU size transitions
  torture: Make torture.sh help message match reality
  rcu-tasks: Set ->percpu_enqueue_shift to zero upon contention
  rcu-tasks: Use order_base_2() instead of ilog2()
  rcu: Create and use an rcu_rdp_cpu_online()
  rcu: Make rcu_barrier() no longer block CPU-hotplug operations
  rcu: Rework rcu_barrier() and callback-migration logic
  rcu: Refactor rcu_barrier() empty-list handling
  rcu: Kill rnp->ofl_seq and use only rcu_state.ofl_lock for exclusion
  torture: Change KVM environment variable to RCUTORTURE
  ...
parents a04b1bf5 d5578190
...@@ -4504,6 +4504,8 @@ ...@@ -4504,6 +4504,8 @@
(the least-favored priority). Otherwise, when (the least-favored priority). Otherwise, when
RCU_BOOST is not set, valid values are 0-99 and RCU_BOOST is not set, valid values are 0-99 and
the default is zero (non-realtime operation). the default is zero (non-realtime operation).
When RCU_NOCB_CPU is set, also adjust the
priority of NOCB callback kthreads.
rcutree.rcu_nocb_gp_stride= [KNL] rcutree.rcu_nocb_gp_stride= [KNL]
Set the number of NOCB callback kthreads in Set the number of NOCB callback kthreads in
......
...@@ -16324,6 +16324,8 @@ F: tools/testing/selftests/resctrl/ ...@@ -16324,6 +16324,8 @@ F: tools/testing/selftests/resctrl/
READ-COPY UPDATE (RCU) READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@kernel.org> M: "Paul E. McKenney" <paulmck@kernel.org>
M: Frederic Weisbecker <frederic@kernel.org> (kernel/rcu/tree_nocb.h)
M: Neeraj Upadhyay <quic_neeraju@quicinc.com> (kernel/rcu/tasks.h)
M: Josh Triplett <josh@joshtriplett.org> M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org> R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
......
...@@ -84,7 +84,7 @@ static inline int rcu_preempt_depth(void) ...@@ -84,7 +84,7 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */ /* Internal to kernel */
void rcu_init(void); void rcu_init(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user); void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu); void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu); void rcutree_migrate_callbacks(int cpu);
...@@ -924,7 +924,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) ...@@ -924,7 +924,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* *
* kvfree_rcu(ptr); * kvfree_rcu(ptr);
* *
* where @ptr is a pointer to kvfree(). * where @ptr is the pointer to be freed by kvfree().
* *
* Please note, head-less way of freeing is permitted to * Please note, head-less way of freeing is permitted to
* use from a context that has to follow might_sleep() * use from a context that has to follow might_sleep()
......
...@@ -62,7 +62,7 @@ static inline void rcu_irq_exit_check_preempt(void) { } ...@@ -62,7 +62,7 @@ static inline void rcu_irq_exit_check_preempt(void) { }
void exit_rcu(void); void exit_rcu(void);
void rcu_scheduler_starting(void); void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void); void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void); bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void); bool rcu_is_watching(void);
......
...@@ -47,11 +47,7 @@ static inline void prepare_to_rcuwait(struct rcuwait *w) ...@@ -47,11 +47,7 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
rcu_assign_pointer(w->task, current); rcu_assign_pointer(w->task, current);
} }
static inline void finish_rcuwait(struct rcuwait *w) extern void finish_rcuwait(struct rcuwait *w);
{
rcu_assign_pointer(w->task, NULL);
__set_current_state(TASK_RUNNING);
}
#define rcuwait_wait_event(w, condition, state) \ #define rcuwait_wait_event(w, condition, state) \
({ \ ({ \
......
...@@ -794,16 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read, ...@@ -794,16 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read,
* Tracepoint for rcu_barrier() execution. The string "s" describes * Tracepoint for rcu_barrier() execution. The string "s" describes
* the rcu_barrier phase: * the rcu_barrier phase:
* "Begin": rcu_barrier() started. * "Begin": rcu_barrier() started.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "EarlyExit": rcu_barrier() piggybacked, thus early exit. * "EarlyExit": rcu_barrier() piggybacked, thus early exit.
* "Inc1": rcu_barrier() piggyback check counter incremented. * "Inc1": rcu_barrier() piggyback check counter incremented.
* "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks. * "Inc2": rcu_barrier() piggyback check counter incremented.
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
* "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks. * "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback. * "LastCB": An rcu_barrier_callback() invoked the last callback.
* "Inc2": rcu_barrier() piggyback check counter incremented. * "NQ": rcu_barrier() found a CPU with no callbacks.
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count. * is the count of remaining callbacks, and "done" is the piggybacking count.
*/ */
......
...@@ -56,13 +56,13 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) ...@@ -56,13 +56,13 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp, static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
int flags) int flags)
{ {
rsclp->flags |= flags; WRITE_ONCE(rsclp->flags, rsclp->flags | flags);
} }
static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp, static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
int flags) int flags)
{ {
rsclp->flags &= ~flags; WRITE_ONCE(rsclp->flags, rsclp->flags & ~flags);
} }
static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp, static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
......
...@@ -284,7 +284,7 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ ...@@ -284,7 +284,7 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
/* /*
* Allocate an element from the rcu_tortures pool. * Allocate an element from the rcu_tortures pool.
...@@ -387,7 +387,7 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) ...@@ -387,7 +387,7 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
* period, and we want a long delay occasionally to trigger * period, and we want a long delay occasionally to trigger
* force_quiescent_state. */ * force_quiescent_state. */
if (!READ_ONCE(rcu_fwd_cb_nodelay) && if (!atomic_read(&rcu_fwd_cb_nodelay) &&
!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
started = cur_ops->get_gp_seq(); started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local(); ts = rcu_trace_clock_local();
...@@ -674,6 +674,7 @@ static struct rcu_torture_ops srcu_ops = { ...@@ -674,6 +674,7 @@ static struct rcu_torture_ops srcu_ops = {
.call = srcu_torture_call, .call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier, .cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats, .stats = srcu_torture_stats,
.cbflood_max = 50000,
.irq_capable = 1, .irq_capable = 1,
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
.name = "srcu" .name = "srcu"
...@@ -708,6 +709,7 @@ static struct rcu_torture_ops srcud_ops = { ...@@ -708,6 +709,7 @@ static struct rcu_torture_ops srcud_ops = {
.call = srcu_torture_call, .call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier, .cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats, .stats = srcu_torture_stats,
.cbflood_max = 50000,
.irq_capable = 1, .irq_capable = 1,
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
.name = "srcud" .name = "srcud"
...@@ -997,7 +999,7 @@ static int rcu_torture_boost(void *arg) ...@@ -997,7 +999,7 @@ static int rcu_torture_boost(void *arg)
goto checkwait; goto checkwait;
/* Wait for the next test interval. */ /* Wait for the next test interval. */
oldstarttime = boost_starttime; oldstarttime = READ_ONCE(boost_starttime);
while (time_before(jiffies, oldstarttime)) { while (time_before(jiffies, oldstarttime)) {
schedule_timeout_interruptible(oldstarttime - jiffies); schedule_timeout_interruptible(oldstarttime - jiffies);
if (stutter_wait("rcu_torture_boost")) if (stutter_wait("rcu_torture_boost"))
...@@ -1041,10 +1043,11 @@ static int rcu_torture_boost(void *arg) ...@@ -1041,10 +1043,11 @@ static int rcu_torture_boost(void *arg)
* interval. Besides, we are running at RT priority, * interval. Besides, we are running at RT priority,
* so delays should be relatively rare. * so delays should be relatively rare.
*/ */
while (oldstarttime == boost_starttime && !kthread_should_stop()) { while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
if (mutex_trylock(&boost_mutex)) { if (mutex_trylock(&boost_mutex)) {
if (oldstarttime == boost_starttime) { if (oldstarttime == boost_starttime) {
boost_starttime = jiffies + test_boost_interval * HZ; WRITE_ONCE(boost_starttime,
jiffies + test_boost_interval * HZ);
n_rcu_torture_boosts++; n_rcu_torture_boosts++;
} }
mutex_unlock(&boost_mutex); mutex_unlock(&boost_mutex);
...@@ -1276,7 +1279,7 @@ rcu_torture_writer(void *arg) ...@@ -1276,7 +1279,7 @@ rcu_torture_writer(void *arg)
boot_ended = rcu_inkernel_boot_has_ended(); boot_ended = rcu_inkernel_boot_has_ended();
stutter_waited = stutter_wait("rcu_torture_writer"); stutter_waited = stutter_wait("rcu_torture_writer");
if (stutter_waited && if (stutter_waited &&
!READ_ONCE(rcu_fwd_cb_nodelay) && !atomic_read(&rcu_fwd_cb_nodelay) &&
!cur_ops->slow_gps && !cur_ops->slow_gps &&
!torture_must_stop() && !torture_must_stop() &&
boot_ended) boot_ended)
...@@ -2180,7 +2183,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) ...@@ -2180,7 +2183,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
if (rfp->n_launders_hist[i].n_launders > 0) if (rfp->n_launders_hist[i].n_launders > 0)
break; break;
mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
__func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
gps_old = rfp->rcu_launder_gp_seq_start; gps_old = rfp->rcu_launder_gp_seq_start;
...@@ -2193,7 +2195,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) ...@@ -2193,7 +2195,6 @@ static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
gps_old = gps; gps_old = gps;
} }
pr_cont("\n"); pr_cont("\n");
mutex_unlock(&rcu_fwd_mutex);
} }
/* Callback function for continuous-flood RCU callbacks. */ /* Callback function for continuous-flood RCU callbacks. */
...@@ -2281,6 +2282,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, ...@@ -2281,6 +2282,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
unsigned long stopat; unsigned long stopat;
static DEFINE_TORTURE_RANDOM(trs); static DEFINE_TORTURE_RANDOM(trs);
pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
if (!cur_ops->sync) if (!cur_ops->sync)
return; // Cannot do need_resched() forward progress testing without ->sync. return; // Cannot do need_resched() forward progress testing without ->sync.
if (cur_ops->call && cur_ops->cb_barrier) { if (cur_ops->call && cur_ops->cb_barrier) {
...@@ -2289,7 +2291,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, ...@@ -2289,7 +2291,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
} }
/* Tight loop containing cond_resched(). */ /* Tight loop containing cond_resched(). */
WRITE_ONCE(rcu_fwd_cb_nodelay, true); atomic_inc(&rcu_fwd_cb_nodelay);
cur_ops->sync(); /* Later readers see above write. */ cur_ops->sync(); /* Later readers see above write. */
if (selfpropcb) { if (selfpropcb) {
WRITE_ONCE(fcs.stop, 0); WRITE_ONCE(fcs.stop, 0);
...@@ -2325,6 +2327,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, ...@@ -2325,6 +2327,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
if (selfpropcb) { if (selfpropcb) {
WRITE_ONCE(fcs.stop, 1); WRITE_ONCE(fcs.stop, 1);
cur_ops->sync(); /* Wait for running CB to complete. */ cur_ops->sync(); /* Wait for running CB to complete. */
pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
cur_ops->cb_barrier(); /* Wait for queued callbacks. */ cur_ops->cb_barrier(); /* Wait for queued callbacks. */
} }
...@@ -2333,7 +2336,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, ...@@ -2333,7 +2336,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
destroy_rcu_head_on_stack(&fcs.rh); destroy_rcu_head_on_stack(&fcs.rh);
} }
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
WRITE_ONCE(rcu_fwd_cb_nodelay, false); atomic_dec(&rcu_fwd_cb_nodelay);
} }
/* Carry out call_rcu() forward-progress testing. */ /* Carry out call_rcu() forward-progress testing. */
...@@ -2353,13 +2356,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) ...@@ -2353,13 +2356,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
unsigned long stopat; unsigned long stopat;
unsigned long stoppedat; unsigned long stoppedat;
pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
if (READ_ONCE(rcu_fwd_emergency_stop)) if (READ_ONCE(rcu_fwd_emergency_stop))
return; /* Get out of the way quickly, no GP wait! */ return; /* Get out of the way quickly, no GP wait! */
if (!cur_ops->call) if (!cur_ops->call)
return; /* Can't do call_rcu() fwd prog without ->call. */ return; /* Can't do call_rcu() fwd prog without ->call. */
/* Loop continuously posting RCU callbacks. */ /* Loop continuously posting RCU callbacks. */
WRITE_ONCE(rcu_fwd_cb_nodelay, true); atomic_inc(&rcu_fwd_cb_nodelay);
cur_ops->sync(); /* Later readers see above write. */ cur_ops->sync(); /* Later readers see above write. */
WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
...@@ -2414,6 +2418,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) ...@@ -2414,6 +2418,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
cver = READ_ONCE(rcu_torture_current_version) - cver; cver = READ_ONCE(rcu_torture_current_version) - cver;
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
(void)rcu_torture_fwd_prog_cbfree(rfp); (void)rcu_torture_fwd_prog_cbfree(rfp);
...@@ -2427,11 +2432,13 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) ...@@ -2427,11 +2432,13 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
n_launders, n_launders_sa, n_launders, n_launders_sa,
n_max_gps, n_max_cbs, cver, gps); n_max_gps, n_max_cbs, cver, gps);
atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
rcu_torture_fwd_cb_hist(rfp); rcu_torture_fwd_cb_hist(rfp);
mutex_unlock(&rcu_fwd_mutex);
} }
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
tick_dep_clear_task(current, TICK_DEP_BIT_RCU); tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
WRITE_ONCE(rcu_fwd_cb_nodelay, false); atomic_dec(&rcu_fwd_cb_nodelay);
} }
...@@ -2511,7 +2518,7 @@ static int rcu_torture_fwd_prog(void *args) ...@@ -2511,7 +2518,7 @@ static int rcu_torture_fwd_prog(void *args)
firsttime = false; firsttime = false;
WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
} else { } else {
while (READ_ONCE(rcu_fwd_seq) == oldseq) while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
oldseq = READ_ONCE(rcu_fwd_seq); oldseq = READ_ONCE(rcu_fwd_seq);
} }
...@@ -2905,8 +2912,10 @@ rcu_torture_cleanup(void) ...@@ -2905,8 +2912,10 @@ rcu_torture_cleanup(void)
int i; int i;
if (torture_cleanup_begin()) { if (torture_cleanup_begin()) {
if (cur_ops->cb_barrier != NULL) if (cur_ops->cb_barrier != NULL) {
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
cur_ops->cb_barrier(); cur_ops->cb_barrier();
}
return; return;
} }
if (!cur_ops) { if (!cur_ops) {
...@@ -2961,8 +2970,10 @@ rcu_torture_cleanup(void) ...@@ -2961,8 +2970,10 @@ rcu_torture_cleanup(void)
* Wait for all RCU callbacks to fire, then do torture-type-specific * Wait for all RCU callbacks to fire, then do torture-type-specific
* cleanup operations. * cleanup operations.
*/ */
if (cur_ops->cb_barrier != NULL) if (cur_ops->cb_barrier != NULL) {
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
cur_ops->cb_barrier(); cur_ops->cb_barrier();
}
if (cur_ops->cleanup != NULL) if (cur_ops->cleanup != NULL)
cur_ops->cleanup(); cur_ops->cleanup();
......
...@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \ ...@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \
.call_func = call, \ .call_func = call, \
.rtpcpu = &rt_name ## __percpu, \ .rtpcpu = &rt_name ## __percpu, \
.name = n, \ .name = n, \
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1, \ .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
.percpu_enqueue_lim = 1, \ .percpu_enqueue_lim = 1, \
.percpu_dequeue_lim = 1, \ .percpu_dequeue_lim = 1, \
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
...@@ -302,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, ...@@ -302,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
if (unlikely(needadjust)) { if (unlikely(needadjust)) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim != nr_cpu_ids) { if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1); WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
...@@ -417,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) ...@@ -417,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim > 1) { if (rtp->percpu_enqueue_lim > 1) {
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1); WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
smp_store_release(&rtp->percpu_enqueue_lim, 1); smp_store_release(&rtp->percpu_enqueue_lim, 1);
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
......
This diff is collapsed.
...@@ -56,8 +56,6 @@ struct rcu_node { ...@@ -56,8 +56,6 @@ struct rcu_node {
/* Initialized from ->qsmaskinitnext at the */ /* Initialized from ->qsmaskinitnext at the */
/* beginning of each grace period. */ /* beginning of each grace period. */
unsigned long qsmaskinitnext; unsigned long qsmaskinitnext;
unsigned long ofl_seq; /* CPU-hotplug operation sequence count. */
/* Online CPUs for next grace period. */
unsigned long expmask; /* CPUs or groups that need to check in */ unsigned long expmask; /* CPUs or groups that need to check in */
/* to allow the current expedited GP */ /* to allow the current expedited GP */
/* to complete. */ /* to complete. */
...@@ -110,6 +108,9 @@ struct rcu_node { ...@@ -110,6 +108,9 @@ struct rcu_node {
/* side effect, not as a lock. */ /* side effect, not as a lock. */
unsigned long boost_time; unsigned long boost_time;
/* When to start boosting (jiffies). */ /* When to start boosting (jiffies). */
struct mutex boost_kthread_mutex;
/* Exclusion for thread spawning and affinity */
/* manipulation. */
struct task_struct *boost_kthread_task; struct task_struct *boost_kthread_task;
/* kthread that takes care of priority */ /* kthread that takes care of priority */
/* boosting for this rcu_node structure. */ /* boosting for this rcu_node structure. */
...@@ -190,6 +191,7 @@ struct rcu_data { ...@@ -190,6 +191,7 @@ struct rcu_data {
bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
/* 4) rcu_barrier(), OOM callbacks, and expediting. */ /* 4) rcu_barrier(), OOM callbacks, and expediting. */
unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */
struct rcu_head barrier_head; struct rcu_head barrier_head;
int exp_dynticks_snap; /* Double-check need for IPI. */ int exp_dynticks_snap; /* Double-check need for IPI. */
...@@ -203,6 +205,8 @@ struct rcu_data { ...@@ -203,6 +205,8 @@ struct rcu_data {
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
struct timer_list nocb_timer; /* Enforce finite deferral. */ struct timer_list nocb_timer; /* Enforce finite deferral. */
unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
/* spawning */
/* The following fields are used by call_rcu, hence own cacheline. */ /* The following fields are used by call_rcu, hence own cacheline. */
raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp; raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
...@@ -237,6 +241,7 @@ struct rcu_data { ...@@ -237,6 +241,7 @@ struct rcu_data {
/* rcuc per-CPU kthread or NULL. */ /* rcuc per-CPU kthread or NULL. */
unsigned int rcu_cpu_kthread_status; unsigned int rcu_cpu_kthread_status;
char rcu_cpu_has_work; char rcu_cpu_has_work;
unsigned long rcuc_activity;
/* 7) Diagnostic data, including RCU CPU stall warnings. */ /* 7) Diagnostic data, including RCU CPU stall warnings. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */
...@@ -302,9 +307,8 @@ struct rcu_state { ...@@ -302,9 +307,8 @@ struct rcu_state {
/* The following fields are guarded by the root rcu_node's lock. */ /* The following fields are guarded by the root rcu_node's lock. */
u8 boost ____cacheline_internodealigned_in_smp; unsigned long gp_seq ____cacheline_internodealigned_in_smp;
/* Subject to priority boost. */ /* Grace-period sequence #. */
unsigned long gp_seq; /* Grace-period sequence #. */
unsigned long gp_max; /* Maximum GP duration in */ unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */ /* jiffies. */
struct task_struct *gp_kthread; /* Task for grace periods. */ struct task_struct *gp_kthread; /* Task for grace periods. */
...@@ -323,6 +327,8 @@ struct rcu_state { ...@@ -323,6 +327,8 @@ struct rcu_state {
/* rcu_barrier(). */ /* rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */ /* End of fields guarded by barrier_mutex. */
raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */
struct mutex exp_mutex; /* Serialize expedited GP. */ struct mutex exp_mutex; /* Serialize expedited GP. */
struct mutex exp_wake_mutex; /* Serialize wakeup. */ struct mutex exp_wake_mutex; /* Serialize wakeup. */
unsigned long expedited_sequence; /* Take a ticket. */ unsigned long expedited_sequence; /* Take a ticket. */
...@@ -355,7 +361,7 @@ struct rcu_state { ...@@ -355,7 +361,7 @@ struct rcu_state {
const char *name; /* Name of structure. */ const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */ char abbr; /* Abbreviated name. */
raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
/* Synchronize offline with */ /* Synchronize offline with */
/* GP pre-initialization. */ /* GP pre-initialization. */
}; };
......
...@@ -502,7 +502,8 @@ static void synchronize_rcu_expedited_wait(void) ...@@ -502,7 +502,8 @@ static void synchronize_rcu_expedited_wait(void)
if (synchronize_rcu_expedited_wait_once(1)) if (synchronize_rcu_expedited_wait_once(1))
return; return;
rcu_for_each_leaf_node(rnp) { rcu_for_each_leaf_node(rnp) {
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { mask = READ_ONCE(rnp->expmask);
for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
rdp = per_cpu_ptr(&rcu_data, cpu); rdp = per_cpu_ptr(&rcu_data, cpu);
if (rdp->rcu_forced_tick_exp) if (rdp->rcu_forced_tick_exp)
continue; continue;
...@@ -656,7 +657,7 @@ static void rcu_exp_handler(void *unused) ...@@ -656,7 +657,7 @@ static void rcu_exp_handler(void *unused)
*/ */
if (!depth) { if (!depth) {
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
rcu_dynticks_curr_cpu_in_eqs()) { rcu_is_cpu_rrupt_from_idle()) {
rcu_report_exp_rdp(rdp); rcu_report_exp_rdp(rdp);
} else { } else {
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
......
...@@ -1169,7 +1169,7 @@ void __init rcu_init_nohz(void) ...@@ -1169,7 +1169,7 @@ void __init rcu_init_nohz(void)
struct rcu_data *rdp; struct rcu_data *rdp;
#if defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_NO_HZ_FULL)
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
need_rcu_nocb_mask = true; need_rcu_nocb_mask = true;
#endif /* #if defined(CONFIG_NO_HZ_FULL) */ #endif /* #if defined(CONFIG_NO_HZ_FULL) */
...@@ -1226,6 +1226,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) ...@@ -1226,6 +1226,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
raw_spin_lock_init(&rdp->nocb_gp_lock); raw_spin_lock_init(&rdp->nocb_gp_lock);
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
rcu_cblist_init(&rdp->nocb_bypass); rcu_cblist_init(&rdp->nocb_bypass);
mutex_init(&rdp->nocb_gp_kthread_mutex);
} }
/* /*
...@@ -1238,6 +1239,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) ...@@ -1238,6 +1239,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_data *rdp_gp; struct rcu_data *rdp_gp;
struct task_struct *t; struct task_struct *t;
struct sched_param sp;
if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup) if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup)
return; return;
...@@ -1247,20 +1249,30 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) ...@@ -1247,20 +1249,30 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
return; return;
/* If we didn't spawn the GP kthread first, reorganize! */ /* If we didn't spawn the GP kthread first, reorganize! */
sp.sched_priority = kthread_prio;
rdp_gp = rdp->nocb_gp_rdp; rdp_gp = rdp->nocb_gp_rdp;
mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
if (!rdp_gp->nocb_gp_kthread) { if (!rdp_gp->nocb_gp_kthread) {
t = kthread_run(rcu_nocb_gp_kthread, rdp_gp, t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
"rcuog/%d", rdp_gp->cpu); "rcuog/%d", rdp_gp->cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
return; return;
}
WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
if (kthread_prio)
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
} }
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
/* Spawn the kthread for this CPU. */ /* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_cb_kthread, rdp, t = kthread_run(rcu_nocb_cb_kthread, rdp,
"rcuo%c/%d", rcu_state.abbr, cpu); "rcuo%c/%d", rcu_state.abbr, cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__)) if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
return; return;
if (kthread_prio)
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
WRITE_ONCE(rdp->nocb_cb_kthread, t); WRITE_ONCE(rdp->nocb_cb_kthread, t);
WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
} }
...@@ -1348,7 +1360,7 @@ static void __init rcu_organize_nocb_kthreads(void) ...@@ -1348,7 +1360,7 @@ static void __init rcu_organize_nocb_kthreads(void)
*/ */
void rcu_bind_current_to_nocb(void) void rcu_bind_current_to_nocb(void)
{ {
if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask)) if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
} }
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
......
...@@ -330,7 +330,7 @@ void rcu_note_context_switch(bool preempt) ...@@ -330,7 +330,7 @@ void rcu_note_context_switch(bool preempt)
* then queue the task as required based on the states * then queue the task as required based on the states
* of any ongoing and expedited grace periods. * of any ongoing and expedited grace periods.
*/ */
WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
trace_rcu_preempt_task(rcu_state.name, trace_rcu_preempt_task(rcu_state.name,
t->pid, t->pid,
...@@ -556,16 +556,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) ...@@ -556,16 +556,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
/* /*
* If this was the last task on the expedited lists, * If this was the last task on the expedited lists,
* then we need to report up the rcu_node hierarchy. * then we need to report up the rcu_node hierarchy.
*/ */
if (!empty_exp && empty_exp_now) if (!empty_exp && empty_exp_now)
rcu_report_exp_rnp(rnp, true); rcu_report_exp_rnp(rnp, true);
/* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
} else { } else {
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -773,7 +773,6 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) ...@@ -773,7 +773,6 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
int cpu; int cpu;
int i; int i;
struct list_head *lhp; struct list_head *lhp;
bool onl;
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_node *rnp1; struct rcu_node *rnp1;
...@@ -797,9 +796,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) ...@@ -797,9 +796,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
pr_cont("\n"); pr_cont("\n");
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
rdp = per_cpu_ptr(&rcu_data, cpu); rdp = per_cpu_ptr(&rcu_data, cpu);
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
cpu, ".o"[onl], cpu, ".o"[rcu_rdp_cpu_online(rdp)],
(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
} }
...@@ -996,12 +994,15 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) ...@@ -996,12 +994,15 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
*/ */
static void rcu_cpu_kthread_setup(unsigned int cpu) static void rcu_cpu_kthread_setup(unsigned int cpu)
{ {
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
struct sched_param sp; struct sched_param sp;
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
#endif /* #ifdef CONFIG_RCU_BOOST */ #endif /* #ifdef CONFIG_RCU_BOOST */
WRITE_ONCE(rdp->rcuc_activity, jiffies);
} }
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
...@@ -1172,15 +1173,14 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) ...@@ -1172,15 +1173,14 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
struct sched_param sp; struct sched_param sp;
struct task_struct *t; struct task_struct *t;
mutex_lock(&rnp->boost_kthread_mutex);
if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
return; goto out;
rcu_state.boost = 1;
t = kthread_create(rcu_boost_kthread, (void *)rnp, t = kthread_create(rcu_boost_kthread, (void *)rnp,
"rcub/%d", rnp_index); "rcub/%d", rnp_index);
if (WARN_ON_ONCE(IS_ERR(t))) if (WARN_ON_ONCE(IS_ERR(t)))
return; goto out;
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
...@@ -1188,6 +1188,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) ...@@ -1188,6 +1188,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
out:
mutex_unlock(&rnp->boost_kthread_mutex);
} }
/* /*
...@@ -1210,14 +1213,16 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) ...@@ -1210,14 +1213,16 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
return; return;
if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
return; return;
mutex_lock(&rnp->boost_kthread_mutex);
for_each_leaf_node_possible_cpu(rnp, cpu) for_each_leaf_node_possible_cpu(rnp, cpu)
if ((mask & leaf_node_cpu_bit(rnp, cpu)) && if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
cpu != outgoingcpu) cpu != outgoingcpu)
cpumask_set_cpu(cpu, cm); cpumask_set_cpu(cpu, cm);
cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU)); cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU));
if (cpumask_weight(cm) == 0) if (cpumask_empty(cm))
cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU)); cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU));
set_cpus_allowed_ptr(t, cm); set_cpus_allowed_ptr(t, cm);
mutex_unlock(&rnp->boost_kthread_mutex);
free_cpumask_var(cm); free_cpumask_var(cm);
} }
......
...@@ -379,6 +379,15 @@ static bool rcu_is_gp_kthread_starving(unsigned long *jp) ...@@ -379,6 +379,15 @@ static bool rcu_is_gp_kthread_starving(unsigned long *jp)
return j > 2 * HZ; return j > 2 * HZ;
} }
static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
{
unsigned long j = jiffies - READ_ONCE(rdp->rcuc_activity);
if (jp)
*jp = j;
return j > 2 * HZ;
}
/* /*
* Print out diagnostic information for the specified stalled CPU. * Print out diagnostic information for the specified stalled CPU.
* *
...@@ -430,6 +439,29 @@ static void print_cpu_stall_info(int cpu) ...@@ -430,6 +439,29 @@ static void print_cpu_stall_info(int cpu)
falsepositive ? " (false positive?)" : ""); falsepositive ? " (false positive?)" : "");
} }
static void rcuc_kthread_dump(struct rcu_data *rdp)
{
int cpu;
unsigned long j;
struct task_struct *rcuc;
rcuc = rdp->rcu_cpu_kthread_task;
if (!rcuc)
return;
cpu = task_cpu(rcuc);
if (cpu_is_offline(cpu) || idle_cpu(cpu))
return;
if (!rcu_is_rcuc_kthread_starving(rdp, &j))
return;
pr_err("%s kthread starved for %ld jiffies\n", rcuc->comm, j);
sched_show_task(rcuc);
if (!trigger_single_cpu_backtrace(cpu))
dump_cpu_task(cpu);
}
/* Complain about starvation of grace-period kthread. */ /* Complain about starvation of grace-period kthread. */
static void rcu_check_gp_kthread_starvation(void) static void rcu_check_gp_kthread_starvation(void)
{ {
...@@ -601,6 +633,9 @@ static void print_cpu_stall(unsigned long gps) ...@@ -601,6 +633,9 @@ static void print_cpu_stall(unsigned long gps)
rcu_check_gp_kthread_expired_fqs_timer(); rcu_check_gp_kthread_expired_fqs_timer();
rcu_check_gp_kthread_starvation(); rcu_check_gp_kthread_starvation();
if (!use_softirq)
rcuc_kthread_dump(rdp);
rcu_dump_cpu_stacks(); rcu_dump_cpu_stacks();
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
......
...@@ -407,6 +407,13 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, ...@@ -407,6 +407,13 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
} }
EXPORT_SYMBOL_GPL(__wait_rcu_gp); EXPORT_SYMBOL_GPL(__wait_rcu_gp);
void finish_rcuwait(struct rcuwait *w)
{
rcu_assign_pointer(w->task, NULL);
__set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL_GPL(finish_rcuwait);
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head) void init_rcu_head(struct rcu_head *head)
{ {
......
...@@ -911,7 +911,7 @@ void torture_kthread_stopping(char *title) ...@@ -911,7 +911,7 @@ void torture_kthread_stopping(char *title)
{ {
char buf[128]; char buf[128];
snprintf(buf, sizeof(buf), "Stopping %s", title); snprintf(buf, sizeof(buf), "%s is stopping", title);
VERBOSE_TOROUT_STRING(buf); VERBOSE_TOROUT_STRING(buf);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
torture_shutdown_absorb(title); torture_shutdown_absorb(title);
...@@ -931,12 +931,14 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, ...@@ -931,12 +931,14 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
int ret = 0; int ret = 0;
VERBOSE_TOROUT_STRING(m); VERBOSE_TOROUT_STRING(m);
*tp = kthread_run(fn, arg, "%s", s); *tp = kthread_create(fn, arg, "%s", s);
if (IS_ERR(*tp)) { if (IS_ERR(*tp)) {
ret = PTR_ERR(*tp); ret = PTR_ERR(*tp);
TOROUT_ERRSTRING(f); TOROUT_ERRSTRING(f);
*tp = NULL; *tp = NULL;
return ret;
} }
wake_up_process(*tp); // Process is sleeping, so ordering provided.
torture_shuffle_task_register(*tp); torture_shuffle_task_register(*tp);
return ret; return ret;
} }
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
# #
# Authors: Paul E. McKenney <paulmck@kernel.org> # Authors: Paul E. McKenney <paulmck@kernel.org>
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' | egrep 'Badness|WARNING:|Warn|BUG|===========|BUG: KCSAN:|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' |
grep -v 'ODEBUG: ' | grep -v 'ODEBUG: ' |
grep -v 'This means that this is a DEBUG kernel and it is' | grep -v 'This means that this is a DEBUG kernel and it is' |
grep -v 'Warning: unable to open an initial console' | grep -v 'Warning: unable to open an initial console' |
......
...@@ -47,8 +47,8 @@ else ...@@ -47,8 +47,8 @@ else
exit 1 exit 1
fi fi
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
PATH=${KVM}/bin:$PATH; export PATH PATH=${RCUTORTURE}/bin:$PATH; export PATH
. functions.sh . functions.sh
dryrun= dryrun=
......
...@@ -49,8 +49,8 @@ fi ...@@ -49,8 +49,8 @@ fi
mkdir $resdir/$ds mkdir $resdir/$ds
echo Results directory: $resdir/$ds echo Results directory: $resdir/$ds
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
PATH=${KVM}/bin:$PATH; export PATH PATH=${RCUTORTURE}/bin:$PATH; export PATH
. functions.sh . functions.sh
echo Using all `identify_qemu_vcpus` CPUs. echo Using all `identify_qemu_vcpus` CPUs.
......
...@@ -22,8 +22,8 @@ T=${TMPDIR-/tmp}/kvm-end-run-stats.sh.$$ ...@@ -22,8 +22,8 @@ T=${TMPDIR-/tmp}/kvm-end-run-stats.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
PATH=${KVM}/bin:$PATH; export PATH PATH=${RCUTORTURE}/bin:$PATH; export PATH
. functions.sh . functions.sh
default_starttime="`get_starttime`" default_starttime="`get_starttime`"
starttime="${2-default_starttime}" starttime="${2-default_starttime}"
......
...@@ -30,10 +30,16 @@ editor=${EDITOR-vi} ...@@ -30,10 +30,16 @@ editor=${EDITOR-vi}
files= files=
for i in ${rundir}/*/Make.out for i in ${rundir}/*/Make.out
do do
scenariodir="`dirname $i`"
scenariobasedir="`echo ${scenariodir} | sed -e 's/\.[0-9]*$//'`"
if egrep -q "error:|warning:|^ld: .*undefined reference to" < $i if egrep -q "error:|warning:|^ld: .*undefined reference to" < $i
then then
egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags egrep "error:|warning:|^ld: .*undefined reference to" < $i > $i.diags
files="$files $i.diags $i" files="$files $i.diags $i"
elif ! test -f ${scenariobasedir}/vmlinux
then
echo No ${scenariobasedir}/vmlinux file > $i.diags
files="$files $i.diags $i"
fi fi
done done
if test -n "$files" if test -n "$files"
......
...@@ -25,7 +25,7 @@ stopstate="`grep 'End-test grace-period state: g' $i/console.log 2> /dev/null | ...@@ -25,7 +25,7 @@ stopstate="`grep 'End-test grace-period state: g' $i/console.log 2> /dev/null |
tail -1 | sed -e 's/^\[[ 0-9.]*] //' | tail -1 | sed -e 's/^\[[ 0-9.]*] //' |
awk '{ print \"[\" $1 \" \" $5 \" \" $6 \" \" $7 \"]\"; }' | awk '{ print \"[\" $1 \" \" $5 \" \" $6 \" \" $7 \"]\"; }' |
tr -d '\012\015'`" tr -d '\012\015'`"
fwdprog="`grep 'rcu_torture_fwd_prog n_max_cbs: ' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k3nr | head -1 | awk '{ print $2 " " $3 }'`" fwdprog="`grep 'rcu_torture_fwd_prog n_max_cbs: ' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k3nr | head -1 | awk '{ print $2 " " $3 }' | tr -d '\015'`"
if test -z "$ngps" if test -z "$ngps"
then then
echo "$configfile ------- " $stopstate echo "$configfile ------- " $stopstate
......
...@@ -19,8 +19,8 @@ then ...@@ -19,8 +19,8 @@ then
exit 1 exit 1
fi fi
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
PATH=${KVM}/bin:$PATH; export PATH PATH=${RCUTORTURE}/bin:$PATH; export PATH
. functions.sh . functions.sh
starttime="`get_starttime`" starttime="`get_starttime`"
...@@ -108,8 +108,8 @@ else ...@@ -108,8 +108,8 @@ else
cat $T/kvm-again.sh.out | tee -a "$oldrun/remote-log" cat $T/kvm-again.sh.out | tee -a "$oldrun/remote-log"
exit 2 exit 2
fi fi
cp -a "$rundir" "$KVM/res/" cp -a "$rundir" "$RCUTORTURE/res/"
oldrun="$KVM/res/$ds" oldrun="$RCUTORTURE/res/$ds"
fi fi
echo | tee -a "$oldrun/remote-log" echo | tee -a "$oldrun/remote-log"
echo " ----" kvm-again.sh output: "(`date`)" | tee -a "$oldrun/remote-log" echo " ----" kvm-again.sh output: "(`date`)" | tee -a "$oldrun/remote-log"
...@@ -155,18 +155,23 @@ do ...@@ -155,18 +155,23 @@ do
echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log" echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log"
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -" cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
ret=$? ret=$?
if test "$ret" -ne 0 tries=0
then while test "$ret" -ne 0
echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. | tee -a "$oldrun/remote-log" do
echo Unable to download $T/binres.tgz to system $i, waiting and then retrying. $tries prior retries. | tee -a "$oldrun/remote-log"
sleep 60 sleep 60
cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -" cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
ret=$? ret=$?
if test "$ret" -ne 0 if test "$ret" -ne 0
then then
echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log" if test "$tries" > 5
exit 10 then
echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log"
exit 10
fi
fi fi
fi tries=$((tries+1))
done
done done
# Function to check for presence of a file on the specified system. # Function to check for presence of a file on the specified system.
......
...@@ -25,15 +25,15 @@ LANG=en_US.UTF-8; export LANG ...@@ -25,15 +25,15 @@ LANG=en_US.UTF-8; export LANG
dur=$((30*60)) dur=$((30*60))
dryrun="" dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
PATH=${KVM}/bin:$PATH; export PATH PATH=${RCUTORTURE}/bin:$PATH; export PATH
. functions.sh . functions.sh
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`" TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
TORTURE_DEFCONFIG=defconfig TORTURE_DEFCONFIG=defconfig
TORTURE_BOOT_IMAGE="" TORTURE_BOOT_IMAGE=""
TORTURE_BUILDONLY= TORTURE_BUILDONLY=
TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD TORTURE_INITRD="$RCUTORTURE/initrd"; export TORTURE_INITRD
TORTURE_KCONFIG_ARG="" TORTURE_KCONFIG_ARG=""
TORTURE_KCONFIG_GDB_ARG="" TORTURE_KCONFIG_GDB_ARG=""
TORTURE_BOOT_GDB_ARG="" TORTURE_BOOT_GDB_ARG=""
...@@ -262,7 +262,7 @@ else ...@@ -262,7 +262,7 @@ else
exit 1 exit 1
fi fi
CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG CONFIGFRAG=${RCUTORTURE}/configs/${TORTURE_SUITE}; export CONFIGFRAG
defaultconfigs="`tr '\012' ' ' < $CONFIGFRAG/CFLIST`" defaultconfigs="`tr '\012' ' ' < $CONFIGFRAG/CFLIST`"
if test -z "$configs" if test -z "$configs"
...@@ -272,7 +272,7 @@ fi ...@@ -272,7 +272,7 @@ fi
if test -z "$resdir" if test -z "$resdir"
then then
resdir=$KVM/res resdir=$RCUTORTURE/res
fi fi
# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus. # Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
...@@ -280,7 +280,7 @@ configs_derep= ...@@ -280,7 +280,7 @@ configs_derep=
for CF in $configs for CF in $configs
do do
case $CF in case $CF in
[0-9]\**|[0-9][0-9]\**|[0-9][0-9][0-9]\**) [0-9]\**|[0-9][0-9]\**|[0-9][0-9][0-9]\**|[0-9][0-9][0-9][0-9]\**)
config_reps=`echo $CF | sed -e 's/\*.*$//'` config_reps=`echo $CF | sed -e 's/\*.*$//'`
CF1=`echo $CF | sed -e 's/^[^*]*\*//'` CF1=`echo $CF | sed -e 's/^[^*]*\*//'`
;; ;;
...@@ -386,7 +386,7 @@ END { ...@@ -386,7 +386,7 @@ END {
# Generate a script to execute the tests in appropriate batches. # Generate a script to execute the tests in appropriate batches.
cat << ___EOF___ > $T/script cat << ___EOF___ > $T/script
CONFIGFRAG="$CONFIGFRAG"; export CONFIGFRAG CONFIGFRAG="$CONFIGFRAG"; export CONFIGFRAG
KVM="$KVM"; export KVM RCUTORTURE="$RCUTORTURE"; export RCUTORTURE
PATH="$PATH"; export PATH PATH="$PATH"; export PATH
TORTURE_ALLOTED_CPUS="$TORTURE_ALLOTED_CPUS"; export TORTURE_ALLOTED_CPUS TORTURE_ALLOTED_CPUS="$TORTURE_ALLOTED_CPUS"; export TORTURE_ALLOTED_CPUS
TORTURE_BOOT_IMAGE="$TORTURE_BOOT_IMAGE"; export TORTURE_BOOT_IMAGE TORTURE_BOOT_IMAGE="$TORTURE_BOOT_IMAGE"; export TORTURE_BOOT_IMAGE
...@@ -569,7 +569,7 @@ ___EOF___ ...@@ -569,7 +569,7 @@ ___EOF___
awk < $T/cfgcpu.pack \ awk < $T/cfgcpu.pack \
-v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \ -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
-v CONFIGDIR="$CONFIGFRAG/" \ -v CONFIGDIR="$CONFIGFRAG/" \
-v KVM="$KVM" \ -v RCUTORTURE="$RCUTORTURE" \
-v ncpus=$cpus \ -v ncpus=$cpus \
-v jitter="$jitter" \ -v jitter="$jitter" \
-v rd=$resdir/$ds/ \ -v rd=$resdir/$ds/ \
......
...@@ -138,6 +138,16 @@ then ...@@ -138,6 +138,16 @@ then
then then
summary="$summary Bugs: $n_bugs" summary="$summary Bugs: $n_bugs"
fi fi
n_kcsan=`egrep -c 'BUG: KCSAN: ' $file`
if test "$n_kcsan" -ne 0
then
if test "$n_bugs" = "$n_kcsan"
then
summary="$summary (all bugs kcsan)"
else
summary="$summary KCSAN: $n_kcsan"
fi
fi
n_calltrace=`grep -c 'Call Trace:' $file` n_calltrace=`grep -c 'Call Trace:' $file`
if test "$n_calltrace" -ne 0 if test "$n_calltrace" -ne 0
then then
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
scriptname=$0 scriptname=$0
args="$*" args="$*"
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
PATH=${KVM}/bin:$PATH; export PATH PATH=${RCUTORTURE}/bin:$PATH; export PATH
. functions.sh . functions.sh
TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`" TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
...@@ -37,7 +37,7 @@ configs_scftorture= ...@@ -37,7 +37,7 @@ configs_scftorture=
kcsan_kmake_args= kcsan_kmake_args=
# Default compression, duration, and apportionment. # Default compression, duration, and apportionment.
compress_kasan_vmlinux="`identify_qemu_vcpus`" compress_concurrency="`identify_qemu_vcpus`"
duration_base=10 duration_base=10
duration_rcutorture_frac=7 duration_rcutorture_frac=7
duration_locktorture_frac=1 duration_locktorture_frac=1
...@@ -67,12 +67,12 @@ function doyesno () { ...@@ -67,12 +67,12 @@ function doyesno () {
usage () { usage () {
echo "Usage: $scriptname optional arguments:" echo "Usage: $scriptname optional arguments:"
echo " --compress-kasan-vmlinux concurrency" echo " --compress-concurrency concurrency"
echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\"" echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\""
echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\"" echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\""
echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\"" echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
echo " --doall" echo " --do-all"
echo " --doallmodconfig / --do-no-allmodconfig" echo " --do-allmodconfig / --do-no-allmodconfig"
echo " --do-clocksourcewd / --do-no-clocksourcewd" echo " --do-clocksourcewd / --do-no-clocksourcewd"
echo " --do-kasan / --do-no-kasan" echo " --do-kasan / --do-no-kasan"
echo " --do-kcsan / --do-no-kcsan" echo " --do-kcsan / --do-no-kcsan"
...@@ -91,9 +91,9 @@ usage () { ...@@ -91,9 +91,9 @@ usage () {
while test $# -gt 0 while test $# -gt 0
do do
case "$1" in case "$1" in
--compress-kasan-vmlinux) --compress-concurrency)
checkarg --compress-kasan-vmlinux "(concurrency level)" $# "$2" '^[0-9][0-9]*$' '^error' checkarg --compress-concurrency "(concurrency level)" $# "$2" '^[0-9][0-9]*$' '^error'
compress_kasan_vmlinux=$2 compress_concurrency=$2
shift shift
;; ;;
--config-rcutorture|--configs-rcutorture) --config-rcutorture|--configs-rcutorture)
...@@ -414,8 +414,14 @@ nfailures=0 ...@@ -414,8 +414,14 @@ nfailures=0
echo FAILURES: | tee -a $T/log echo FAILURES: | tee -a $T/log
if test -s "$T/failures" if test -s "$T/failures"
then then
cat "$T/failures" | tee -a $T/log awk < "$T/failures" -v sq="'" '{ print "echo " sq $0 sq; print "sed -e " sq "1,/^ --- .* Test summary:$/d" sq " " $2 "/log | grep Summary: | sed -e " sq "s/^[^S]*/ /" sq; }' | sh | tee -a $T/log | tee "$T/failuresum"
nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`" nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
grep "^ Summary: " "$T/failuresum" |
grep -v '^ Summary: Bugs: [0-9]* (all bugs kcsan)$' > "$T/nonkcsan"
if test -s "$T/nonkcsan"
then
nonkcsanbug="yes"
fi
ret=2 ret=2
fi fi
if test "$do_kcsan" = "yes" if test "$do_kcsan" = "yes"
...@@ -424,12 +430,16 @@ then ...@@ -424,12 +430,16 @@ then
fi fi
echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
if test -z "$nonkcsanbug" && test -s "$T/failuresum"
then
echo " All bugs were KCSAN failures."
fi
tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`" tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
if test -n "$tdir" && test $compress_kasan_vmlinux -gt 0 if test -n "$tdir" && test $compress_concurrency -gt 0
then then
# KASAN vmlinux files can approach 1GB in size, so compress them. # KASAN vmlinux files can approach 1GB in size, so compress them.
echo Looking for KASAN files to compress: `date` > "$tdir/log-xz" 2>&1 echo Looking for K[AC]SAN files to compress: `date` > "$tdir/log-xz" 2>&1
find "$tdir" -type d -name '*-kasan' -print > $T/xz-todo find "$tdir" -type d -name '*-k[ac]san' -print > $T/xz-todo
ncompresses=0 ncompresses=0
batchno=1 batchno=1
if test -s $T/xz-todo if test -s $T/xz-todo
...@@ -447,7 +457,7 @@ then ...@@ -447,7 +457,7 @@ then
do do
xz "$j" >> "$tdir/log-xz" 2>&1 & xz "$j" >> "$tdir/log-xz" 2>&1 &
ncompresses=$((ncompresses+1)) ncompresses=$((ncompresses+1))
if test $ncompresses -ge $compress_kasan_vmlinux if test $ncompresses -ge $compress_concurrency
then then
echo Waiting for batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log echo Waiting for batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log
wait wait
......
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=4 CONFIG_NR_CPUS=3
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT_VOLUNTARY=n
......
rcutorture.torture_type=srcu rcutorture.torture_type=srcu
rcutorture.fwd_progress=3
rcutorture.torture_type=srcud rcutorture.torture_type=srcud
rcupdate.rcu_self_test=1 rcupdate.rcu_self_test=1
rcutorture.fwd_progress=3
srcutree.big_cpu_lim=5
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=4 CONFIG_NR_CPUS=5
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT_VOLUNTARY=n
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment