tree.c 128 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Read-Copy Update mechanism for mutual exclusion
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15 16
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
17 18 19 20 21 22 23 24 25 26 27
 *
 * Copyright IBM Corporation, 2008
 *
 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
 *	    Manfred Spraul <manfred@colorfullife.com>
 *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
 *
 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 *
 * For detailed explanation of Read-Copy Update mechanism see -
28
 *	Documentation/RCU
29 30 31 32 33 34
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
35
#include <linux/rcupdate_wait.h>
36 37
#include <linux/interrupt.h>
#include <linux/sched.h>
38
#include <linux/sched/debug.h>
39
#include <linux/nmi.h>
40
#include <linux/atomic.h>
41
#include <linux/bitops.h>
42
#include <linux/export.h>
43 44 45 46 47 48 49
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
50
#include <linux/kernel_stat.h>
51 52
#include <linux/wait.h>
#include <linux/kthread.h>
53
#include <uapi/linux/sched/types.h>
54
#include <linux/prefetch.h>
55 56
#include <linux/delay.h>
#include <linux/stop_machine.h>
57
#include <linux/random.h>
58
#include <linux/trace_events.h>
59
#include <linux/suspend.h>
60
#include <linux/ftrace.h>
61

62
#include "tree.h"
63
#include "rcu.h"
64

65 66 67 68 69
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "rcutree."

70 71
/* Data structures. */

72 73 74 75 76 77 78 79
/*
 * In order to export the rcu_state name to the tracing tools, it
 * needs to be added in the __tracepoint_string section.
 * This requires defining a separate variable tp_<sname>_varname
 * that points to the string being used, and this will allow
 * the tracing userspace tools to be able to decipher the string
 * address to the matching string.
 */
80 81
#ifdef CONFIG_TRACING
# define DEFINE_RCU_TPS(sname) \
82
static char sname##_varname[] = #sname; \
83 84 85 86 87 88 89 90 91
static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
# define RCU_STATE_NAME(sname) sname##_varname
#else
# define DEFINE_RCU_TPS(sname)
# define RCU_STATE_NAME(sname) __stringify(sname)
#endif

#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
DEFINE_RCU_TPS(sname) \
92
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
93
struct rcu_state sname##_state = { \
94
	.level = { &sname##_state.node[0] }, \
95
	.rda = &sname##_data, \
96
	.call = cr, \
97
	.gp_state = RCU_GP_IDLE, \
Paul E. McKenney's avatar
Paul E. McKenney committed
98 99
	.gpnum = 0UL - 300UL, \
	.completed = 0UL - 300UL, \
100
	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
101
	.name = RCU_STATE_NAME(sname), \
102
	.abbr = sabbr, \
103
	.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
104
	.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
105
}
106

107 108
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
109

110
static struct rcu_state *const rcu_state_p;
111
LIST_HEAD(rcu_struct_flavors);
112

113 114 115
/* Dump rcu_node combining tree at boot to verify correct setup. */
static bool dump_tree;
module_param(dump_tree, bool, 0444);
116 117 118
/* Control rcu_node-tree auto-balancing at boot time. */
static bool rcu_fanout_exact;
module_param(rcu_fanout_exact, bool, 0444);
119 120
/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
121
module_param(rcu_fanout_leaf, int, 0444);
122
int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
123
/* Number of rcu_nodes at specified level. */
124
int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
125
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
126 127
/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
128

129
/*
130 131 132 133
 * The rcu_scheduler_active variable is initialized to the value
 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 * RCU can assume that there is but one task, allowing RCU to (for example)
134
 * optimize synchronize_rcu() to a simple barrier().  When this variable
135 136 137 138 139
 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 * to detect real grace periods.  This variable is also used to suppress
 * boot-time false positives from lockdep-RCU error checking.  Finally, it
 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 * is fully initialized, including all of its kthreads having been spawned.
140
 */
141 142 143
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);

144 145 146 147 148 149 150 151 152 153 154 155 156 157
/*
 * The rcu_scheduler_fully_active variable transitions from zero to one
 * during the early_initcall() processing, which is after the scheduler
 * is capable of creating new tasks.  So RCU processing (for example,
 * creating tasks for RCU priority boosting) must be delayed until after
 * rcu_scheduler_fully_active transitions from zero to one.  We also
 * currently delay invocation of any RCU callbacks until after this point.
 *
 * It might later prove better for people registering RCU callbacks during
 * early boot to take responsibility for these callbacks, but one step at
 * a time.
 */
static int rcu_scheduler_fully_active __read_mostly;

158 159
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
Thomas Gleixner's avatar
Thomas Gleixner committed
160
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
161 162
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
163 164
static void rcu_report_exp_rdp(struct rcu_state *rsp,
			       struct rcu_data *rdp, bool wake);
165
static void sync_sched_exp_online_cleanup(int cpu);
166

167
/* rcuc/rcub kthread realtime priority */
168
static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
169 170
module_param(kthread_prio, int, 0644);

171
/* Delay in jiffies for grace-period initialization delays, debug only. */
172

173 174 175 176 177 178
static int gp_preinit_delay;
module_param(gp_preinit_delay, int, 0444);
static int gp_init_delay;
module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
179

180 181
/*
 * Number of grace periods between delays, normalized by the duration of
182
 * the delay.  The longer the delay, the more the grace periods between
183 184 185 186 187 188 189
 * each delay.  The reason for this normalization is that it means that,
 * for non-zero delays, the overall slowdown of grace periods is constant
 * regardless of the duration of the delay.  This arrangement balances
 * the need for long delays to increase some race probabilities with the
 * need for fast grace periods to increase other race probabilities.
 */
#define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
190

191 192 193 194 195 196 197 198 199 200 201 202
/*
 * Track the rcutorture test sequence number and the update version
 * number within a given test.  The rcutorture_testseq is incremented
 * on every rcutorture module load and unload, so has an odd value
 * when a test is running.  The rcutorture_vernum is set to zero
 * when rcutorture starts and is incremented on each rcutorture update.
 * These variables enable correlating rcutorture output with the
 * RCU tracing information.
 */
unsigned long rcutorture_testseq;
unsigned long rcutorture_vernum;

203 204 205 206 207 208 209 210
/*
 * Compute the mask of online CPUs for the specified rcu_node structure.
 * This will not be stable unless the rcu_node structure's ->lock is
 * held, but the bit corresponding to the current CPU will be stable
 * in most contexts.
 */
unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
{
211
	return READ_ONCE(rnp->qsmaskinitnext);
212 213
}

214
/*
215
 * Return true if an RCU grace period is in progress.  The READ_ONCE()s
216 217 218 219 220
 * permit this function to be invoked without holding the root rcu_node
 * structure's ->lock, but of course results can be subject to change.
 */
static int rcu_gp_in_progress(struct rcu_state *rsp)
{
221
	return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
222 223
}

224
/*
225
 * Note a quiescent state.  Because we do not need to know
226
 * how many quiescent states passed, just if there was at least
227
 * one since the start of the grace period, this just sets a flag.
228
 * The caller must have disabled preemption.
229
 */
230
void rcu_sched_qs(void)
231
{
232
	RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
233 234 235 236 237 238 239 240
	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
		return;
	trace_rcu_grace_period(TPS("rcu_sched"),
			       __this_cpu_read(rcu_sched_data.gpnum),
			       TPS("cpuqs"));
	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
		return;
241 242 243
	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
	rcu_report_exp_rdp(&rcu_sched_state,
			   this_cpu_ptr(&rcu_sched_data), true);
244 245
}

246
void rcu_bh_qs(void)
247
{
248
	RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
249
	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
250 251 252
		trace_rcu_grace_period(TPS("rcu_bh"),
				       __this_cpu_read(rcu_bh_data.gpnum),
				       TPS("cpuqs"));
253
		__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
254
	}
255
}
256

257 258 259 260 261 262 263 264 265
/*
 * Steal a bit from the bottom of ->dynticks for idle entry/exit
 * control.  Initially this is for TLB flushing.
 */
#define RCU_DYNTICK_CTRL_MASK 0x1
#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
#ifndef rcu_eqs_special_exit
#define rcu_eqs_special_exit() do { } while (0)
#endif
266 267

static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
268
	.dynticks_nesting = 1,
269
	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
270
	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
271 272
};

273 274 275 276 277 278 279
/*
 * Record entry into an extended quiescent state.  This is only to be
 * called when not already in an extended quiescent state.
 */
static void rcu_dynticks_eqs_enter(void)
{
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
280
	int seq;
281 282

	/*
283
	 * CPUs seeing atomic_add_return() must see prior RCU read-side
284 285 286
	 * critical sections, and we also must force ordering with the
	 * next idle sojourn.
	 */
287 288 289 290 291 292 293
	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
	/* Better be in an extended quiescent state! */
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     (seq & RCU_DYNTICK_CTRL_CTR));
	/* Better not have special action (TLB flush) pending! */
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     (seq & RCU_DYNTICK_CTRL_MASK));
294 295 296 297 298 299 300 301 302
}

/*
 * Record exit from an extended quiescent state.  This is only to be
 * called from an extended quiescent state.
 */
static void rcu_dynticks_eqs_exit(void)
{
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
303
	int seq;
304 305

	/*
306
	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
307 308 309
	 * and we also must force ordering with the next RCU read-side
	 * critical section.
	 */
310 311 312 313 314 315 316 317 318
	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     !(seq & RCU_DYNTICK_CTRL_CTR));
	if (seq & RCU_DYNTICK_CTRL_MASK) {
		atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
		smp_mb__after_atomic(); /* _exit after clearing mask. */
		/* Prefer duplicate flushes to losing a flush. */
		rcu_eqs_special_exit();
	}
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
}

/*
 * Reset the current CPU's ->dynticks counter to indicate that the
 * newly onlined CPU is no longer in an extended quiescent state.
 * This will either leave the counter unchanged, or increment it
 * to the next non-quiescent value.
 *
 * The non-atomic test/increment sequence works because the upper bits
 * of the ->dynticks counter are manipulated only by the corresponding CPU,
 * or when the corresponding CPU is offline.
 */
static void rcu_dynticks_eqs_online(void)
{
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

335
	if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
336
		return;
337
	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
338 339
}

340 341 342 343 344 345 346 347 348
/*
 * Is the current CPU in an extended quiescent state?
 *
 * No ordering, as we are sampling CPU-local information.
 */
bool rcu_dynticks_curr_cpu_in_eqs(void)
{
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

349
	return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
350 351
}

352 353 354 355
/*
 * Snapshot the ->dynticks counter with full ordering so as to allow
 * stable comparison of this counter with past and future snapshots.
 */
356
int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
357 358 359
{
	int snap = atomic_add_return(0, &rdtp->dynticks);

360
	return snap & ~RCU_DYNTICK_CTRL_MASK;
361 362
}

363 364 365 366 367 368
/*
 * Return true if the snapshot returned from rcu_dynticks_snap()
 * indicates that RCU is in an extended quiescent state.
 */
static bool rcu_dynticks_in_eqs(int snap)
{
369
	return !(snap & RCU_DYNTICK_CTRL_CTR);
370 371 372 373 374 375 376 377 378 379 380 381
}

/*
 * Return true if the CPU corresponding to the specified rcu_dynticks
 * structure has spent some time in an extended quiescent state since
 * rcu_dynticks_snap() returned the specified snapshot.
 */
static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
{
	return snap != rcu_dynticks_snap(rdtp);
}

382 383 384 385 386 387 388
/*
 * Do a double-increment of the ->dynticks counter to emulate a
 * momentary idle-CPU quiescent state.
 */
static void rcu_dynticks_momentary_idle(void)
{
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
389 390
	int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
					&rdtp->dynticks);
391 392

	/* It is illegal to call this from idle state. */
393
	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
394 395
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
/*
 * Set the special (bottom) bit of the specified CPU so that it
 * will take special action (such as flushing its TLB) on the
 * next exit from an extended quiescent state.  Returns true if
 * the bit was successfully set, or false if the CPU was not in
 * an extended quiescent state.
 */
bool rcu_eqs_special_set(int cpu)
{
	int old;
	int new;
	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);

	do {
		old = atomic_read(&rdtp->dynticks);
		if (old & RCU_DYNTICK_CTRL_CTR)
			return false;
		new = old | RCU_DYNTICK_CTRL_MASK;
	} while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
	return true;
416
}
417

418 419 420 421 422 423 424
/*
 * Let the RCU core know that this CPU has gone through the scheduler,
 * which is a quiescent state.  This is called when the need for a
 * quiescent state is urgent, so we burn an atomic operation and full
 * memory barriers to let the RCU core know about it, regardless of what
 * this CPU might (or might not) do in the near future.
 *
425
 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
426 427
 *
 * The caller must have disabled interrupts.
428 429 430
 */
static void rcu_momentary_dyntick_idle(void)
{
431 432
	raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
	rcu_dynticks_momentary_idle();
433 434
}

435 436 437
/*
 * Note a context switch.  This is a quiescent state for RCU-sched,
 * and requires special handling for preemptible RCU.
438
 * The caller must have disabled interrupts.
439
 */
440
void rcu_note_context_switch(bool preempt)
441
{
442
	barrier(); /* Avoid RCU read-side critical sections leaking down. */
443
	trace_rcu_utilization(TPS("Start context switch"));
444
	rcu_sched_qs();
445
	rcu_preempt_note_context_switch(preempt);
446 447 448 449
	/* Load rcu_urgent_qs before other flags. */
	if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
		goto out;
	this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
450
	if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
451
		rcu_momentary_dyntick_idle();
452
	this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
453 454
	if (!preempt)
		rcu_note_voluntary_context_switch_lite(current);
455
out:
456
	trace_rcu_utilization(TPS("End context switch"));
457
	barrier(); /* Avoid RCU read-side critical sections leaking up. */
458
}
459
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
460

461
/*
462
 * Register a quiescent state for all RCU flavors.  If there is an
463 464
 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
 * dyntick-idle quiescent state visible to other CPUs (but only for those
465
 * RCU flavors in desperate need of a quiescent state, which will normally
466 467
 * be none of them).  Either way, do a lightweight quiescent state for
 * all RCU flavors.
468 469 470 471 472
 *
 * The barrier() calls are redundant in the common case when this is
 * called externally, but just in case this is called from within this
 * file.
 *
473 474 475
 */
void rcu_all_qs(void)
{
476 477
	unsigned long flags;

478 479 480 481 482 483 484 485 486
	if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
		return;
	preempt_disable();
	/* Load rcu_urgent_qs before other flags. */
	if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
		preempt_enable();
		return;
	}
	this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
487
	barrier(); /* Avoid RCU read-side critical sections leaking down. */
488
	if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
489
		local_irq_save(flags);
490
		rcu_momentary_dyntick_idle();
491 492
		local_irq_restore(flags);
	}
493
	if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
494
		rcu_sched_qs();
495
	this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
496
	barrier(); /* Avoid RCU read-side critical sections leaking up. */
497
	preempt_enable();
498 499 500
}
EXPORT_SYMBOL_GPL(rcu_all_qs);

501 502 503 504 505 506
#define DEFAULT_RCU_BLIMIT 10     /* Maximum callbacks per rcu_do_batch. */
static long blimit = DEFAULT_RCU_BLIMIT;
#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
static long qhimark = DEFAULT_RCU_QHIMARK;
#define DEFAULT_RCU_QLOMARK 100   /* Once only this many pending, use blimit. */
static long qlowmark = DEFAULT_RCU_QLOMARK;
507

508 509 510
module_param(blimit, long, 0444);
module_param(qhimark, long, 0444);
module_param(qlowmark, long, 0444);
511

512 513
static ulong jiffies_till_first_fqs = ULONG_MAX;
static ulong jiffies_till_next_fqs = ULONG_MAX;
514
static bool rcu_kick_kthreads;
515 516 517

module_param(jiffies_till_first_fqs, ulong, 0644);
module_param(jiffies_till_next_fqs, ulong, 0644);
518
module_param(rcu_kick_kthreads, bool, 0644);
519

520 521 522 523
/*
 * How long the grace period must be before we start recruiting
 * quiescent-state help from rcu_note_context_switch().
 */
524 525
static ulong jiffies_till_sched_qs = HZ / 10;
module_param(jiffies_till_sched_qs, ulong, 0444);
526

527
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
528
static void force_quiescent_state(struct rcu_state *rsp);
529
static int rcu_pending(void);
530 531

/*
532
 * Return the number of RCU batches started thus far for debug & stats.
533
 */
534 535 536 537 538 539 540 541
unsigned long rcu_batches_started(void)
{
	return rcu_state_p->gpnum;
}
EXPORT_SYMBOL_GPL(rcu_batches_started);

/*
 * Return the number of RCU-sched batches started thus far for debug & stats.
542
 */
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
unsigned long rcu_batches_started_sched(void)
{
	return rcu_sched_state.gpnum;
}
EXPORT_SYMBOL_GPL(rcu_batches_started_sched);

/*
 * Return the number of RCU BH batches started thus far for debug & stats.
 */
unsigned long rcu_batches_started_bh(void)
{
	return rcu_bh_state.gpnum;
}
EXPORT_SYMBOL_GPL(rcu_batches_started_bh);

/*
 * Return the number of RCU batches completed thus far for debug & stats.
 */
unsigned long rcu_batches_completed(void)
{
	return rcu_state_p->completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed);

/*
 * Return the number of RCU-sched batches completed thus far for debug & stats.
569
 */
570
unsigned long rcu_batches_completed_sched(void)
571
{
572
	return rcu_sched_state.completed;
573
}
574
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
575 576

/*
577
 * Return the number of RCU BH batches completed thus far for debug & stats.
578
 */
579
unsigned long rcu_batches_completed_bh(void)
580 581 582 583 584
{
	return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
/*
 * Return the number of RCU expedited batches completed thus far for
 * debug & stats.  Odd numbers mean that a batch is in progress, even
 * numbers mean idle.  The value returned will thus be roughly double
 * the cumulative batches since boot.
 */
unsigned long rcu_exp_batches_completed(void)
{
	return rcu_state_p->expedited_sequence;
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);

/*
 * Return the number of RCU-sched expedited batches completed thus far
 * for debug & stats.  Similar to rcu_exp_batches_completed().
 */
unsigned long rcu_exp_batches_completed_sched(void)
{
	return rcu_sched_state.expedited_sequence;
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);

607 608 609 610 611
/*
 * Force a quiescent state.
 */
void rcu_force_quiescent_state(void)
{
612
	force_quiescent_state(rcu_state_p);
613 614 615
}
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);

616 617 618 619 620
/*
 * Force a quiescent state for RCU BH.
 */
void rcu_bh_force_quiescent_state(void)
{
621
	force_quiescent_state(&rcu_bh_state);
622 623 624
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);

625 626 627 628 629 630 631 632 633
/*
 * Force a quiescent state for RCU-sched.
 */
void rcu_sched_force_quiescent_state(void)
{
	force_quiescent_state(&rcu_sched_state);
}
EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
/*
 * Show the state of the grace-period kthreads.
 */
void show_rcu_gp_kthreads(void)
{
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp) {
		pr_info("%s: wait state: %d ->state: %#lx\n",
			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
		/* sched_show_task(rsp->gp_kthread); */
	}
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);

649 650 651 652 653 654 655 656 657 658 659 660 661 662
/*
 * Record the number of times rcutorture tests have been initiated and
 * terminated.  This information allows the debugfs tracing stats to be
 * correlated to the rcutorture messages, even when the rcutorture module
 * is being repeatedly loaded and unloaded.  In other words, we cannot
 * store this state in rcutorture itself.
 */
void rcutorture_record_test_transition(void)
{
	rcutorture_testseq++;
	rcutorture_vernum = 0;
}
EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);

663 664 665 666 667 668 669 670 671 672
/*
 * Send along grace-period-related data for rcutorture diagnostics.
 */
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
			    unsigned long *gpnum, unsigned long *completed)
{
	struct rcu_state *rsp = NULL;

	switch (test_type) {
	case RCU_FLAVOR:
673
		rsp = rcu_state_p;
674 675 676 677 678 679 680 681 682 683
		break;
	case RCU_BH_FLAVOR:
		rsp = &rcu_bh_state;
		break;
	case RCU_SCHED_FLAVOR:
		rsp = &rcu_sched_state;
		break;
	default:
		break;
	}
684
	if (rsp == NULL)
685
		return;
686 687 688
	*flags = READ_ONCE(rsp->gp_flags);
	*gpnum = READ_ONCE(rsp->gpnum);
	*completed = READ_ONCE(rsp->completed);
689 690 691
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);

692 693 694 695 696 697 698 699 700 701 702
/*
 * Record the number of writer passes through the current rcutorture test.
 * This is also used to correlate debugfs tracing stats with the rcutorture
 * messages.
 */
void rcutorture_record_progress(unsigned long vernum)
{
	rcutorture_vernum++;
}
EXPORT_SYMBOL_GPL(rcutorture_record_progress);

703 704 705 706 707 708 709 710
/*
 * Return the root node of the specified rcu_state structure.
 */
static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
{
	return &rsp->node[0];
}

711
/*
712 713
 * Enter an RCU extended quiescent state, which can be either the
 * idle loop or adaptive-tickless usermode execution.
714
 *
715 716 717
 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
 * the possibility of usermode upcalls having messed up our count
 * of interrupt nesting level during the prior busy period.
718
 */
719
static void rcu_eqs_enter(bool user)
720
{
721 722
	struct rcu_state *rsp;
	struct rcu_data *rdp;
723
	struct rcu_dynticks *rdtp;
724

725 726 727 728 729 730 731
	rdtp = this_cpu_ptr(&rcu_dynticks);
	WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     rdtp->dynticks_nesting == 0);
	if (rdtp->dynticks_nesting != 1) {
		rdtp->dynticks_nesting--;
		return;
732
	}
733

734
	lockdep_assert_irqs_disabled();
735
	trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
736
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
737 738 739 740
	for_each_rcu_flavor(rsp) {
		rdp = this_cpu_ptr(rsp->rda);
		do_nocb_deferred_wakeup(rdp);
	}
741
	rcu_prepare_for_idle();
742
	WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
743
	rcu_dynticks_eqs_enter();
744
	rcu_dynticks_task_enter();
745
}
746 747 748 749 750 751 752 753 754

/**
 * rcu_idle_enter - inform RCU that current CPU is entering idle
 *
 * Enter idle mode, in other words, -leave- the mode in which RCU
 * read-side critical sections can occur.  (Though RCU read-side
 * critical sections can occur in irq handlers in idle, a possibility
 * handled by irq_enter() and irq_exit().)
 *
755 756
 * If you add or remove a call to rcu_idle_enter(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
757 758 759
 */
void rcu_idle_enter(void)
{
760
	lockdep_assert_irqs_disabled();
761
	rcu_eqs_enter(false);
762
}
763

764
#ifdef CONFIG_NO_HZ_FULL
765 766 767 768 769 770 771
/**
 * rcu_user_enter - inform RCU that we are resuming userspace.
 *
 * Enter RCU idle mode right before resuming userspace.  No use of RCU
 * is permitted between this call and rcu_user_exit(). This way the
 * CPU doesn't need to maintain the tick for RCU maintenance purposes
 * when the CPU runs in userspace.
772 773 774
 *
 * If you add or remove a call to rcu_user_enter(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
775 776 777
 */
void rcu_user_enter(void)
{
778
	lockdep_assert_irqs_disabled();
779
	rcu_eqs_enter(true);
780
}
781
#endif /* CONFIG_NO_HZ_FULL */
782

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
/**
 * rcu_nmi_exit - inform RCU of exit from NMI context
 *
 * If we are returning from the outermost NMI handler that interrupted an
 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
 * to let the RCU grace-period handling know that the CPU is back to
 * being RCU-idle.
 *
 * If you add or remove a call to rcu_nmi_exit(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
 */
void rcu_nmi_exit(void)
{
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

	/*
	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
	 * (We are exiting an NMI handler, so RCU better be paying attention
	 * to us!)
	 */
	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());

	/*
	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
	 * leave it in non-RCU-idle state.
	 */
	if (rdtp->dynticks_nmi_nesting != 1) {
811
		trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
812 813 814 815 816 817
		WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
			   rdtp->dynticks_nmi_nesting - 2);
		return;
	}

	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
818
	trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
819 820 821 822
	WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
	rcu_dynticks_eqs_enter();
}

823 824 825 826 827
/**
 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 *
 * Exit from an interrupt handler, which might possibly result in entering
 * idle mode, in other words, leaving the mode in which read-side critical
828
 * sections can occur.  The caller must have disabled interrupts.
829
 *
830 831
 * This code assumes that the idle loop never does anything that might
 * result in unbalanced calls to irq_enter() and irq_exit().  If your
832 833
 * architecture's idle loop violates this assumption, RCU will give you what
 * you deserve, good and hard.  But very infrequently and irreproducibly.
834 835 836 837
 *
 * Use things like work queues to work around this limitation.
 *
 * You have been warned.
838 839 840
 *
 * If you add or remove a call to rcu_irq_exit(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
841
 */
842
void rcu_irq_exit(void)
843
{
844
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
845

846
	lockdep_assert_irqs_disabled();
847 848 849 850 851
	if (rdtp->dynticks_nmi_nesting == 1)
		rcu_prepare_for_idle();
	rcu_nmi_exit();
	if (rdtp->dynticks_nmi_nesting == 0)
		rcu_dynticks_task_enter();
852 853 854 855
}

/*
 * Wrapper for rcu_irq_exit() where interrupts are enabled.
856 857 858
 *
 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
859 860 861 862 863 864 865
 */
void rcu_irq_exit_irqson(void)
{
	unsigned long flags;

	local_irq_save(flags);
	rcu_irq_exit();
866 867 868
	local_irq_restore(flags);
}

869 870 871
/*
 * Exit an RCU extended quiescent state, which can be either the
 * idle loop or adaptive-tickless usermode execution.
872 873 874 875
 *
 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
 * allow for the possibility of usermode upcalls messing up our count of
 * interrupt nesting level during the busy period that is just now starting.
876
 */
877
static void rcu_eqs_exit(bool user)
878 879
{
	struct rcu_dynticks *rdtp;
880
	long oldval;
881

882
	lockdep_assert_irqs_disabled();
883
	rdtp = this_cpu_ptr(&rcu_dynticks);
884
	oldval = rdtp->dynticks_nesting;
885
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
886 887
	if (oldval) {
		rdtp->dynticks_nesting++;
888
		return;
889
	}
890 891 892 893
	rcu_dynticks_task_exit();
	rcu_dynticks_eqs_exit();
	rcu_cleanup_after_idle();
	trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
894
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
895 896
	WRITE_ONCE(rdtp->dynticks_nesting, 1);
	WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
897
}
898 899 900 901 902 903 904

/**
 * rcu_idle_exit - inform RCU that current CPU is leaving idle
 *
 * Exit idle mode, in other words, -enter- the mode in which RCU
 * read-side critical sections can occur.
 *
905 906
 * If you add or remove a call to rcu_idle_exit(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
907 908 909
 */
void rcu_idle_exit(void)
{
910 911 912
	unsigned long flags;

	local_irq_save(flags);
913
	rcu_eqs_exit(false);
914
	local_irq_restore(flags);
915
}
916

917
#ifdef CONFIG_NO_HZ_FULL
918 919 920 921 922
/**
 * rcu_user_exit - inform RCU that we are exiting userspace.
 *
 * Exit RCU idle mode while entering the kernel because it can
 * run a RCU read side critical section anytime.
923 924 925
 *
 * If you add or remove a call to rcu_user_exit(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
926 927 928
 */
void rcu_user_exit(void)
{
929
	rcu_eqs_exit(1);
930
}
931
#endif /* CONFIG_NO_HZ_FULL */
932

933 934 935
/**
 * rcu_nmi_enter - inform RCU of entry to NMI context
 *
936 937 938 939 940
 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
 * that the CPU is active.  This implementation permits nested NMIs, as
 * long as the nesting level does not overflow an int.  (You will probably
 * run out of stack space first.)
941 942 943
 *
 * If you add or remove a call to rcu_nmi_enter(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
944 945 946
 */
void rcu_nmi_enter(void)
{
947
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
948
	long incby = 2;
949

950 951 952 953 954 955 956 957 958 959 960
	/* Complain about underflow. */
	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);

	/*
	 * If idle from RCU viewpoint, atomically increment ->dynticks
	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
	 * to be in the outermost NMI handler that interrupted an RCU-idle
	 * period (observation due to Andy Lutomirski).
	 */
961
	if (rcu_dynticks_curr_cpu_in_eqs()) {
962
		rcu_dynticks_eqs_exit();
963 964
		incby = 1;
	}
965 966
	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
			  rdtp->dynticks_nmi_nesting,
967
			  rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
968 969
	WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
		   rdtp->dynticks_nmi_nesting + incby);
970
	barrier();
971 972 973
}

/**
974
 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
975
 *
976 977
 * Enter an interrupt handler, which might possibly result in exiting
 * idle mode, in other words, entering the mode in which read-side critical
978
 * sections can occur.  The caller must have disabled interrupts.
979
 *
980
 * Note that the Linux kernel is fully capable of entering an interrupt
981 982 983 984 985 986
 * handler that it never exits, for example when doing upcalls to user mode!
 * This code assumes that the idle loop never does upcalls to user mode.
 * If your architecture's idle loop does do upcalls to user mode (or does
 * anything else that results in unbalanced calls to the irq_enter() and
 * irq_exit() functions), RCU will give you what you deserve, good and hard.
 * But very infrequently and irreproducibly.
987 988 989 990
 *
 * Use things like work queues to work around this limitation.
 *
 * You have been warned.
991 992 993
 *
 * If you add or remove a call to rcu_irq_enter(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
994
 */
995
void rcu_irq_enter(void)
996
{
997
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
998

999
	lockdep_assert_irqs_disabled();
1000 1001 1002 1003 1004
	if (rdtp->dynticks_nmi_nesting == 0)
		rcu_dynticks_task_exit();
	rcu_nmi_enter();
	if (rdtp->dynticks_nmi_nesting == 1)
		rcu_cleanup_after_idle();
1005
}
1006

1007 1008
/*
 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1009 1010 1011
 *
 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
1012 1013 1014 1015
 */
void rcu_irq_enter_irqson(void)
{
	unsigned long flags;
1016

1017 1018
	local_irq_save(flags);
	rcu_irq_enter();
1019 1020 1021
	local_irq_restore(flags);
}

1022 1023
/**
 * rcu_is_watching - see if RCU thinks that the current CPU is idle
1024
 *
1025 1026 1027
 * Return true if RCU is watching the running CPU, which means that this
 * CPU can safely enter RCU read-side critical sections.  In other words,
 * if the current CPU is in its idle loop and is neither in an interrupt
1028
 * or NMI handler, return true.
1029
 */
1030
bool notrace rcu_is_watching(void)
1031
{
1032
	bool ret;
1033

1034
	preempt_disable_notrace();
1035
	ret = !rcu_dynticks_curr_cpu_in_eqs();
1036
	preempt_enable_notrace();
1037
	return ret;
1038
}
1039
EXPORT_SYMBOL_GPL(rcu_is_watching);
1040

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
/*
 * If a holdout task is actually running, request an urgent quiescent
 * state from its CPU.  This is unsynchronized, so migrations can cause
 * the request to go to the wrong CPU.  Which is OK, all that will happen
 * is that the CPU's next context switch will be a bit slower and next
 * time around this task will generate another request.
 */
void rcu_request_urgent_qs_task(struct task_struct *t)
{
	int cpu;

	barrier();
	cpu = task_cpu(t);
	if (!task_curr(t))
		return; /* This task is not running on that CPU. */
	smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
}

1059
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1060 1061 1062 1063 1064 1065 1066

/*
 * Is the current CPU online?  Disable preemption to avoid false positives
 * that could otherwise happen due to the current CPU number being sampled,
 * this task being preempted, its old CPU being taken offline, resuming
 * on some other CPU, then determining that its old CPU is now offline.
 * It is OK to use RCU on an offline processor during initial boot, hence
1067 1068 1069 1070 1071 1072
 * the check for rcu_scheduler_fully_active.  Note also that it is OK
 * for a CPU coming online to use RCU for one jiffy prior to marking itself
 * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
 * offline to continue to use RCU for one jiffy after marking itself
 * offline in the cpu_online_mask.  This leniency is necessary given the
 * non-atomic nature of the online and offline processing, for example,
1073 1074
 * the fact that a CPU enters the scheduler after completing the teardown
 * of the CPU.
1075
 *
1076 1077
 * This is also why RCU internally marks CPUs online during in the
 * preparation phase and offline after the CPU has been taken down.
1078 1079 1080 1081 1082 1083
 *
 * Disable checking if in an NMI handler because we cannot safely report
 * errors from NMI handlers anyway.
 */
bool rcu_lockdep_current_cpu_online(void)
{
1084 1085
	struct rcu_data *rdp;
	struct rcu_node *rnp;
1086 1087 1088
	bool ret;

	if (in_nmi())
Fengguang Wu's avatar
Fengguang Wu committed
1089
		return true;
1090
	preempt_disable();
1091
	rdp = this_cpu_ptr(&rcu_sched_data);
1092
	rnp = rdp->mynode;
1093
	ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1094 1095 1096 1097 1098 1099
	      !rcu_scheduler_fully_active;
	preempt_enable();
	return ret;
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);

1100
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1101

1102
/**
1103
 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
1104
 *
1105 1106 1107
 * If the current CPU is idle or running at a first-level (not nested)
 * interrupt from idle, return true.  The caller must have at least
 * disabled preemption.
1108
 */
1109
static int rcu_is_cpu_rrupt_from_idle(void)
1110
{
1111 1112
	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
	       __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
1113 1114
}

1115 1116 1117 1118 1119 1120 1121 1122 1123
/*
 * We are reporting a quiescent state on behalf of some other CPU, so
 * it is our responsibility to check for and handle potential overflow
 * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
 * After all, the CPU might be in deep idle state, and thus executing no
 * code whatsoever.
 */
static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
{
1124
	raw_lockdep_assert_held_rcu_node(rnp);
1125 1126 1127 1128 1129 1130
	if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
		WRITE_ONCE(rdp->gpwrap, true);
	if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
		rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
}

1131 1132 1133
/*
 * Snapshot the specified CPU's dynticks counter so that we can later
 * credit them with an implicit quiescent state.  Return 1 if this CPU
1134
 * is in dynticks idle mode, which is an extended quiescent state.
1135
 */
1136
static int dyntick_save_progress_counter(struct rcu_data *rdp)
1137
{
1138
	rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1139
	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1140
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1141
		rcu_gpnum_ovf(rdp->mynode, rdp);
1142
		return 1;
1143
	}
1144
	return 0;
1145 1146
}

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
/*
 * Handler for the irq_work request posted when a grace period has
 * gone on for too long, but not yet long enough for an RCU CPU
 * stall warning.  Set state appropriately, but just complain if
 * there is unexpected state on entry.
 */
static void rcu_iw_handler(struct irq_work *iwp)
{
	struct rcu_data *rdp;
	struct rcu_node *rnp;

	rdp = container_of(iwp, struct rcu_data, rcu_iw);
	rnp = rdp->mynode;
	raw_spin_lock_rcu_node(rnp);
	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
		rdp->rcu_iw_gpnum = rnp->gpnum;
		rdp->rcu_iw_pending = false;
	}
	raw_spin_unlock_rcu_node(rnp);
}

1168 1169 1170 1171
/*
 * Return true if the specified CPU has passed through a quiescent
 * state by virtue of being in or having passed through an dynticks
 * idle state since the last call to dyntick_save_progress_counter()
1172
 * for this same CPU, or by virtue of having been offline.
1173
 */
1174
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1175
{
1176
	unsigned long jtsq;
1177
	bool *rnhqp;
1178
	bool *ruqp;
1179
	struct rcu_node *rnp = rdp->mynode;
1180 1181 1182 1183 1184 1185 1186 1187 1188

	/*
	 * If the CPU passed through or entered a dynticks idle phase with
	 * no active irq/NMI handlers, then we can safely pretend that the CPU
	 * already acknowledged the request to pass through a quiescent
	 * state.  Either way, that CPU cannot possibly be in an RCU
	 * read-side critical section that started before the beginning
	 * of the current RCU grace period.
	 */
1189
	if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1190
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1191
		rdp->dynticks_fqs++;
1192
		rcu_gpnum_ovf(rnp, rdp);
1193 1194 1195
		return 1;
	}

1196
	/*
1197 1198 1199 1200
	 * Has this CPU encountered a cond_resched_rcu_qs() since the
	 * beginning of the grace period?  For this to be the case,
	 * the CPU has to have noticed the current grace period.  This
	 * might not be the case for nohz_full CPUs looping in the kernel.
1201
	 */
1202
	jtsq = jiffies_till_sched_qs;
1203
	ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1204
	if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1205
	    READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1206 1207
	    READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
1208
		rcu_gpnum_ovf(rnp, rdp);
1209
		return 1;
1210
	} else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
1211 1212
		/* Load rcu_qs_ctr before store to rcu_urgent_qs. */
		smp_store_release(ruqp, true);
1213 1214
	}

1215 1216
	/* Check for the CPU being offline. */
	if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
1217
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1218
		rdp->offline_fqs++;
1219
		rcu_gpnum_ovf(rnp, rdp);
1220 1221
		return 1;
	}
1222 1223

	/*
1224 1225 1226 1227 1228 1229
	 * A CPU running for an extended time within the kernel can
	 * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
	 * even context-switching back and forth between a pair of
	 * in-kernel CPU-bound tasks cannot advance grace periods.
	 * So if the grace period is old enough, make the CPU pay attention.
	 * Note that the unsynchronized assignments to the per-CPU
1230
	 * rcu_need_heavy_qs variable are safe.  Yes, setting of
1231 1232 1233 1234 1235 1236 1237 1238
	 * bits can be lost, but they will be set again on the next
	 * force-quiescent-state pass.  So lost bit sets do not result
	 * in incorrect behavior, merely in a grace period lasting
	 * a few jiffies longer than it might otherwise.  Because
	 * there are at most four threads involved, and because the
	 * updates are only once every few jiffies, the probability of
	 * lossage (and thus of slight grace-period extension) is
	 * quite low.
1239
	 */
1240 1241 1242 1243 1244
	rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
	if (!READ_ONCE(*rnhqp) &&
	    (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
	     time_after(jiffies, rdp->rsp->jiffies_resched))) {
		WRITE_ONCE(*rnhqp, true);
1245 1246
		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
		smp_store_release(ruqp, true);
1247
		rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
1248 1249
	}

1250
	/*
1251 1252 1253 1254
	 * If more than halfway to RCU CPU stall-warning time, do a
	 * resched_cpu() to try to loosen things up a bit.  Also check to
	 * see if the CPU is getting hammered with interrupts, but only
	 * once per grace period, just to keep the IPIs down to a dull roar.
1255
	 */
1256
	if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
1257
		resched_cpu(rdp->cpu);
1258 1259 1260 1261 1262 1263 1264 1265 1266
		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
		    !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
		    (rnp->ffmask & rdp->grpmask)) {
			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
			rdp->rcu_iw_pending = true;
			rdp->rcu_iw_gpnum = rnp->gpnum;
			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
		}
	}
1267

1268
	return 0;
1269 1270 1271 1272
}

static void record_gp_stall_check_time(struct rcu_state *rsp)
{
1273
	unsigned long j = jiffies;
1274
	unsigned long j1;
1275 1276 1277

	rsp->gp_start = j;
	smp_wmb(); /* Record start time before stall time. */
1278
	j1 = rcu_jiffies_till_stall_check();
1279
	WRITE_ONCE(rsp->jiffies_stall, j + j1);
1280
	rsp->jiffies_resched = j + j1 / 2;
1281
	rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1282 1283
}

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
/*
 * Convert a ->gp_state value to a character string.
 */
static const char *gp_state_getname(short gs)
{
	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
		return "???";
	return gp_state_names[gs];
}

1294 1295 1296 1297 1298 1299 1300 1301 1302
/*
 * Complain about starvation of grace-period kthread.
 */
static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
{
	unsigned long gpa;
	unsigned long j;

	j = jiffies;
1303
	gpa = READ_ONCE(rsp->gp_activity);
1304
	if (j - gpa > 2 * HZ) {
1305
		pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1306
		       rsp->name, j - gpa,
1307
		       rsp->gpnum, rsp->completed,
1308 1309
		       rsp->gp_flags,
		       gp_state_getname(rsp->gp_state), rsp->gp_state,
1310 1311
		       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
		       rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
1312
		if (rsp->gp_kthread) {
1313
			pr_err("RCU grace-period kthread stack dump:\n");
1314
			sched_show_task(rsp->gp_kthread);
1315 1316
			wake_up_process(rsp->gp_kthread);
		}
1317
	}
1318 1319
}

1320
/*
1321 1322 1323 1324
 * Dump stacks of all tasks running on stalled CPUs.  First try using
 * NMIs, but fall back to manual remote stack tracing on architectures
 * that don't support NMI-based stack dumps.  The NMI-triggered stack
 * traces are more accurate because they are printed by the target CPU.
1325 1326 1327 1328 1329 1330 1331 1332
 */
static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
{
	int cpu;
	unsigned long flags;
	struct rcu_node *rnp;

	rcu_for_each_leaf_node(rsp, rnp) {
1333
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1334 1335 1336
		for_each_leaf_node_possible_cpu(rnp, cpu)
			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
				if (!trigger_single_cpu_backtrace(cpu))
1337
					dump_cpu_task(cpu);
Boqun Feng's avatar
Boqun Feng committed
1338
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1339 1340 1341
	}
}

1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
/*
 * If too much time has passed in the current grace period, and if
 * so configured, go kick the relevant kthreads.
 */
static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
{
	unsigned long j;

	if (!rcu_kick_kthreads)
		return;
	j = READ_ONCE(rsp->jiffies_kick_kthreads);
1353 1354
	if (time_after(jiffies, j) && rsp->gp_kthread &&
	    (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
1355
		WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
1356
		rcu_ftrace_dump(DUMP_ALL);
1357 1358 1359 1360 1361
		wake_up_process(rsp->gp_kthread);
		WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
	}
}

1362 1363 1364 1365 1366 1367
static inline void panic_on_rcu_stall(void)
{
	if (sysctl_panic_on_rcu_stall)
		panic("RCU Stall\n");
}

1368
static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1369 1370 1371 1372
{
	int cpu;
	long delta;
	unsigned long flags;
1373 1374
	unsigned long gpa;
	unsigned long j;
1375
	int ndetected = 0;
1376
	struct rcu_node *rnp = rcu_get_root(rsp);
1377
	long totqlen = 0;
1378

1379 1380 1381 1382 1383
	/* Kick and suppress, if so configured. */
	rcu_stall_kick_kthreads(rsp);
	if (rcu_cpu_stall_suppress)
		return;

1384 1385
	/* Only let one CPU complain about others per time interval. */

1386
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1387
	delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1388
	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
Boqun Feng's avatar
Boqun Feng committed
1389
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1390 1391
		return;
	}
1392 1393
	WRITE_ONCE(rsp->jiffies_stall,
		   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
Boqun Feng's avatar
Boqun Feng committed
1394
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1395

1396 1397 1398 1399 1400
	/*
	 * OK, time to rat on our buddy...
	 * See Documentation/RCU/stallwarn.txt for info on how to debug
	 * RCU CPU stall warnings.
	 */
1401
	pr_err("INFO: %s detected stalls on CPUs/tasks:",
1402
	       rsp->name);
1403
	print_cpu_stall_info_begin();
1404
	rcu_for_each_leaf_node(rsp, rnp) {
1405
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1406
		ndetected += rcu_print_task_stall(rnp);
1407
		if (rnp->qsmask != 0) {
1408 1409 1410
			for_each_leaf_node_possible_cpu(rnp, cpu)
				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
					print_cpu_stall_info(rsp, cpu);
1411 1412 1413
					ndetected++;
				}
		}
Boqun Feng's avatar
Boqun Feng committed
1414
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1415
	}
1416 1417

	print_cpu_stall_info_end();
1418
	for_each_possible_cpu(cpu)
1419 1420
		totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
							    cpu)->cblist);
1421
	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1422
	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
1423
	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
1424
	if (ndetected) {
1425
		rcu_dump_cpu_stacks(rsp);
1426 1427 1428

		/* Complain about tasks blocking the grace period. */
		rcu_print_detail_task_stall(rsp);
1429
	} else {
1430 1431
		if (READ_ONCE(rsp->gpnum) != gpnum ||
		    READ_ONCE(rsp->completed) == gpnum) {
1432 1433 1434
			pr_err("INFO: Stall ended before state dump start\n");
		} else {
			j = jiffies;
1435
			gpa = READ_ONCE(rsp->gp_activity);
1436
			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1437
			       rsp->name, j - gpa, j, gpa,
1438 1439
			       jiffies_till_next_fqs,
			       rcu_get_root(rsp)->qsmask);
1440 1441 1442 1443
			/* In this case, the current CPU might be at fault. */
			sched_show_task(current);
		}
	}
1444

1445 1446
	rcu_check_gp_kthread_starvation(rsp);

1447 1448
	panic_on_rcu_stall();

1449
	force_quiescent_state(rsp);  /* Kick them all. */
1450 1451 1452 1453
}

static void print_cpu_stall(struct rcu_state *rsp)
{
1454
	int cpu;
1455
	unsigned long flags;
1456
	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1457
	struct rcu_node *rnp = rcu_get_root(rsp);
1458
	long totqlen = 0;
1459

1460 1461 1462 1463 1464
	/* Kick and suppress, if so configured. */
	rcu_stall_kick_kthreads(rsp);
	if (rcu_cpu_stall_suppress)
		return;

1465 1466 1467 1468 1469
	/*
	 * OK, time to rat on ourselves...
	 * See Documentation/RCU/stallwarn.txt for info on how to debug
	 * RCU CPU stall warnings.
	 */
1470
	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1471
	print_cpu_stall_info_begin();
1472
	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
1473
	print_cpu_stall_info(rsp, smp_processor_id());
1474
	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
1475
	print_cpu_stall_info_end();
1476
	for_each_possible_cpu(cpu)
1477 1478
		totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
							    cpu)->cblist);
1479 1480 1481
	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
		jiffies - rsp->gp_start,
		(long)rsp->gpnum, (long)rsp->completed, totqlen);
1482 1483 1484

	rcu_check_gp_kthread_starvation(rsp);

1485
	rcu_dump_cpu_stacks(rsp);
1486

1487
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1488 1489 1490
	if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
		WRITE_ONCE(rsp->jiffies_stall,
			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
Boqun Feng's avatar
Boqun Feng committed
1491
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1492

1493 1494
	panic_on_rcu_stall();

1495 1496 1497 1498 1499 1500 1501 1502
	/*
	 * Attempt to revive the RCU machinery by forcing a context switch.
	 *
	 * A context switch would normally allow the RCU state machine to make
	 * progress and it could be we're stuck in kernel space without context
	 * switches for an entirely unreasonable amount of time.
	 */
	resched_cpu(smp_processor_id());
1503 1504 1505 1506
}

static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
1507 1508 1509
	unsigned long completed;
	unsigned long gpnum;
	unsigned long gps;
1510 1511
	unsigned long j;
	unsigned long js;
1512 1513
	struct rcu_node *rnp;

1514 1515
	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
	    !rcu_gp_in_progress(rsp))
1516
		return;
1517
	rcu_stall_kick_kthreads(rsp);
1518
	j = jiffies;
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536

	/*
	 * Lots of memory barriers to reject false positives.
	 *
	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
	 * then rsp->gp_start, and finally rsp->completed.  These values
	 * are updated in the opposite order with memory barriers (or
	 * equivalent) during grace-period initialization and cleanup.
	 * Now, a false positive can occur if we get an new value of
	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
	 * the memory barriers, the only way that this can happen is if one
	 * grace period ends and another starts between these two fetches.
	 * Detect this by comparing rsp->completed with the previous fetch
	 * from rsp->gpnum.
	 *
	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
	 * and rsp->gp_start suffice to forestall false positives.
	 */
1537
	gpnum = READ_ONCE(rsp->gpnum);
1538
	smp_rmb(); /* Pick up ->gpnum first... */
1539
	js = READ_ONCE(rsp->jiffies_stall);
1540
	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1541
	gps = READ_ONCE(rsp->gp_start);
1542
	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1543
	completed = READ_ONCE(rsp->completed);
1544 1545 1546 1547
	if (ULONG_CMP_GE(completed, gpnum) ||
	    ULONG_CMP_LT(j, js) ||
	    ULONG_CMP_GE(gps, js))
		return; /* No stall or GP completed since entering function. */
1548
	rnp = rdp->mynode;
1549
	if (rcu_gp_in_progress(rsp) &&
1550
	    (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1551 1552 1553 1554

		/* We haven't checked in, so go dump stack. */
		print_cpu_stall(rsp);

1555 1556
	} else if (rcu_gp_in_progress(rsp) &&
		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1557

1558
		/* They had a few time units to dump stack, so complain. */
1559
		print_other_cpu_stall(rsp, gpnum);
1560 1561 1562
	}
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
/**
 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
 *
 * Set the stall-warning timeout way off into the future, thus preventing
 * any RCU CPU stall-warning messages from appearing in the current set of
 * RCU grace periods.
 *
 * The caller must disable hard irqs.
 */
void rcu_cpu_stall_reset(void)
{
1574 1575 1576
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp)
1577
		WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1578 1579
}

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
/*
 * Determine the value that ->completed will have at the end of the
 * next subsequent grace period.  This is used to tag callbacks so that
 * a CPU can invoke callbacks in a timely fashion even if that CPU has
 * been dyntick-idle for an extended period with callbacks under the
 * influence of RCU_FAST_NO_HZ.
 *
 * The caller must hold rnp->lock with interrupts disabled.
 */
static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
				       struct rcu_node *rnp)
{
1592
	raw_lockdep_assert_held_rcu_node(rnp);
1593

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
	/*
	 * If RCU is idle, we just wait for the next grace period.
	 * But we can only be sure that RCU is idle if we are looking
	 * at the root rcu_node structure -- otherwise, a new grace
	 * period might have started, but just not yet gotten around
	 * to initializing the current non-root rcu_node structure.
	 */
	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
		return rnp->completed + 1;

1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
	/*
	 * If the current rcu_node structure believes that RCU is
	 * idle, and if the rcu_state structure does not yet reflect
	 * the start of a new grace period, then the next grace period
	 * will suffice.  The memory barrier is needed to accurately
	 * sample the rsp->gpnum, and pairs with the second lock
	 * acquisition in rcu_gp_init(), which is augmented with
	 * smp_mb__after_unlock_lock() for this purpose.
	 */
	if (rnp->gpnum == rnp->completed) {
		smp_mb(); /* See above block comment. */
		if (READ_ONCE(rsp->gpnum) == rnp->completed)
			return rnp->completed + 1;
	}

1619 1620 1621 1622 1623 1624 1625
	/*
	 * Otherwise, wait for a possible partial grace period and
	 * then the subsequent full grace period.
	 */
	return rnp->completed + 2;
}

1626 1627 1628
/* Trace-event wrapper function for trace_rcu_future_grace_period.  */
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
			      unsigned long c, const char *s)
1629 1630 1631 1632 1633 1634 1635
{
	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
				      rnp->completed, c, rnp->level,
				      rnp->grplo, rnp->grphi, s);
}

/*
1636
 * Start the specified grace period, as needed to handle newly arrived
1637
 * callbacks.  The required future grace periods are recorded in each
1638
 * rcu_node structure's ->need_future_gp[] field.  Returns true if there
1639
 * is reason to awaken the grace-period kthread.
1640
 *
1641 1642
 * The caller must hold the specified rcu_node structure's ->lock, which
 * is why the caller is responsible for waking the grace-period kthread.
1643
 */
1644 1645
static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
			      unsigned long c)
1646
{
1647
	bool ret = false;
1648
	struct rcu_state *rsp = rdp->rsp;
1649
	struct rcu_node *rnp_root;
1650 1651

	/*
1652 1653 1654 1655 1656 1657 1658
	 * Use funnel locking to either acquire the root rcu_node
	 * structure's lock or bail out if the need for this grace period
	 * has already been recorded -- or has already started.  If there
	 * is already a grace period in progress in a non-leaf node, no
	 * recording is needed because the end of the grace period will
	 * scan the leaf rcu_node structures.  Note that rnp->lock must
	 * not be released.
1659
	 */
1660 1661 1662 1663 1664
	raw_lockdep_assert_held_rcu_node(rnp);
	trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
	for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
		if (rnp_root != rnp)
			raw_spin_lock_rcu_node(rnp_root);
1665 1666
		WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
					  need_future_gp_mask(), c));
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
		if (need_future_gp_element(rnp_root, c) ||
		    ULONG_CMP_GE(rnp_root->gpnum, c) ||
		    (rnp != rnp_root &&
		     rnp_root->gpnum != rnp_root->completed)) {
			trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
			goto unlock_out;
		}
		need_future_gp_element(rnp_root, c) = true;
		if (rnp_root != rnp && rnp_root->parent != NULL)
			raw_spin_unlock_rcu_node(rnp_root);
		if (!rnp_root->parent)
			break;  /* At root, and perhaps also leaf. */
1679 1680
	}

1681 1682 1683
	/* If GP already in progress, just leave, otherwise start one. */
	if (rnp_root->gpnum != rnp_root->completed) {
		trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
1684 1685
		goto unlock_out;
	}
1686 1687 1688 1689 1690
	trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
	WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
	if (!rsp->gp_kthread) {
		trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
		goto unlock_out;
1691
	}
1692 1693
	trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
	ret = true;  /* Caller must wake GP kthread. */
1694 1695
unlock_out:
	if (rnp != rnp_root)
Boqun Feng's avatar
Boqun Feng committed
1696
		raw_spin_unlock_rcu_node(rnp_root);
1697
	return ret;
1698 1699 1700 1701
}

/*
 * Clean up any old requests for the just-ended grace period.  Also return
1702
 * whether any additional grace periods have been requested.
1703
 */
1704
static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1705
{
1706
	unsigned long c = rnp->completed;
1707
	bool needmore;
1708 1709
	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);

1710
	need_future_gp_element(rnp, c) = false;
1711
	needmore = need_any_future_gp(rnp);
1712 1713
	trace_rcu_this_gp(rnp, rdp, c,
			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1714 1715 1716
	return needmore;
}

1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
/*
 * Awaken the grace-period kthread for the specified flavor of RCU.
 * Don't do a self-awaken, and don't bother awakening when there is
 * nothing for the grace-period kthread to do (as in several CPUs
 * raced to awaken, and we lost), and finally don't try to awaken
 * a kthread that has not yet been created.
 */
static void rcu_gp_kthread_wake(struct rcu_state *rsp)
{
	if (current == rsp->gp_kthread ||
1727
	    !READ_ONCE(rsp->gp_flags) ||
1728 1729
	    !rsp->gp_kthread)
		return;
1730
	swake_up(&rsp->gp_wq);
1731 1732
}

1733 1734 1735 1736 1737 1738 1739
/*
 * If there is room, assign a ->completed number to any callbacks on
 * this CPU that have not already been assigned.  Also accelerate any
 * callbacks that were previously assigned a ->completed number that has
 * since proven to be too conservative, which can happen if callbacks get
 * assigned a ->completed number while RCU is idle, but with reference to
 * a non-root rcu_node structure.  This function is idempotent, so it does
1740 1741
 * not hurt to call it repeatedly.  Returns an flag saying that we should
 * awaken the RCU grace-period kthread.
1742 1743 1744
 *
 * The caller must hold rnp->lock with interrupts disabled.
 */
1745
static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1746 1747
			       struct rcu_data *rdp)
{
1748
	unsigned long c;
1749
	bool ret = false;
1750

1751
	raw_lockdep_assert_held_rcu_node(rnp);
1752

1753 1754
	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1755
		return false;
1756 1757

	/*
1758 1759 1760 1761 1762 1763 1764 1765
	 * Callbacks are often registered with incomplete grace-period
	 * information.  Something about the fact that getting exact
	 * information requires acquiring a global lock...  RCU therefore
	 * makes a conservative estimate of the grace period number at which
	 * a given callback will become ready to invoke.	The following
	 * code checks this estimate and improves it when possible, thus
	 * accelerating callback invocation to an earlier grace-period
	 * number.
1766
	 */
1767 1768 1769
	c = rcu_cbs_completed(rsp, rnp);
	if (rcu_segcblist_accelerate(&rdp->cblist, c))
		ret = rcu_start_this_gp(rnp, rdp, c);
1770 1771

	/* Trace depending on how much we were able to accelerate. */
1772
	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1773
		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1774
	else
1775
		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1776
	return ret;
1777 1778 1779 1780 1781 1782 1783 1784
}

/*
 * Move any callbacks whose grace period has completed to the
 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
 * sublist.  This function is idempotent, so it does not hurt to
 * invoke it repeatedly.  As long as it is not invoked -too- often...
1785
 * Returns true if the RCU grace-period kthread needs to be awakened.
1786 1787 1788
 *
 * The caller must hold rnp->lock with interrupts disabled.
 */
1789
static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1790 1791
			    struct rcu_data *rdp)
{
1792
	raw_lockdep_assert_held_rcu_node(rnp);
1793

1794 1795
	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1796
		return false;
1797 1798 1799 1800 1801

	/*
	 * Find all callbacks whose ->completed numbers indicate that they
	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
	 */
1802
	rcu_segcblist_advance(&rdp->cblist, rnp->completed);
1803 1804

	/* Classify any remaining callbacks. */
1805
	return rcu_accelerate_cbs(rsp, rnp, rdp);
1806 1807
}

1808
/*
1809 1810 1811
 * Update CPU-local rcu_data state to record the beginnings and ends of
 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
 * structure corresponding to the current CPU, and must have irqs disabled.
1812
 * Returns true if the grace-period kthread needs to be awakened.
1813
 */
1814 1815
static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
			      struct rcu_data *rdp)
1816
{
1817
	bool ret;
1818
	bool need_gp;
1819

1820
	raw_lockdep_assert_held_rcu_node(rnp);
1821

1822
	/* Handle the ends of any preceding grace periods first. */
1823
	if (rdp->completed == rnp->completed &&
1824
	    !unlikely(READ_ONCE(rdp->gpwrap))) {
1825

1826
		/* No grace period end, so just accelerate recent callbacks. */
1827
		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1828

1829 1830 1831
	} else {

		/* Advance callbacks. */
1832
		ret = rcu_advance_cbs(rsp, rnp, rdp);
1833 1834 1835

		/* Remember that we saw this grace-period completion. */
		rdp->completed = rnp->completed;
1836
		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1837
	}
1838

1839
	if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1840 1841 1842 1843 1844 1845
		/*
		 * If the current grace period is waiting for this CPU,
		 * set up to detect a quiescent state, otherwise don't
		 * go looking for one.
		 */
		rdp->gpnum = rnp->gpnum;
1846
		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1847 1848
		need_gp = !!(rnp->qsmask & rdp->grpmask);
		rdp->cpu_no_qs.b.norm = need_gp;
1849
		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1850
		rdp->core_needs_qs = need_gp;
1851
		zero_cpu_stall_ticks(rdp);
1852
		WRITE_ONCE(rdp->gpwrap, false);
1853
		rcu_gpnum_ovf(rnp, rdp);
1854
	}
1855
	return ret;
1856 1857
}

1858
static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1859 1860
{
	unsigned long flags;
1861
	bool needwake;
1862 1863 1864 1865
	struct rcu_node *rnp;

	local_irq_save(flags);
	rnp = rdp->mynode;
1866 1867 1868
	if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
	     rdp->completed == READ_ONCE(rnp->completed) &&
	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1869
	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1870 1871 1872
		local_irq_restore(flags);
		return;
	}
1873
	needwake = __note_gp_changes(rsp, rnp, rdp);
Boqun Feng's avatar
Boqun Feng committed
1874
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1875 1876
	if (needwake)
		rcu_gp_kthread_wake(rsp);
1877 1878
}

1879 1880 1881 1882 1883 1884 1885
static void rcu_gp_slow(struct rcu_state *rsp, int delay)
{
	if (delay > 0 &&
	    !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
		schedule_timeout_uninterruptible(delay);
}

1886
/*
1887
 * Initialize a new grace period.  Return false if no grace period required.
1888
 */
1889
static bool rcu_gp_init(struct rcu_state *rsp)
1890
{
1891
	unsigned long oldmask;
1892
	struct rcu_data *rdp;
1893
	struct rcu_node *rnp = rcu_get_root(rsp);
1894

1895
	WRITE_ONCE(rsp->gp_activity, jiffies);
1896
	raw_spin_lock_irq_rcu_node(rnp);
1897
	if (!READ_ONCE(rsp->gp_flags)) {
1898
		/* Spurious wakeup, tell caller to go back to sleep.  */
Boqun Feng's avatar
Boqun Feng committed
1899
		raw_spin_unlock_irq_rcu_node(rnp);
1900
		return false;
1901
	}
1902
	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
1903

1904 1905 1906 1907 1908
	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
		/*
		 * Grace period already in progress, don't start another.
		 * Not supposed to be able to happen.
		 */
Boqun Feng's avatar
Boqun Feng committed
1909
		raw_spin_unlock_irq_rcu_node(rnp);
1910
		return false;
1911 1912 1913
	}

	/* Advance to a new grace period and initialize state. */
1914
	record_gp_stall_check_time(rsp);
1915 1916
	/* Record GP times before starting GP, hence smp_store_release(). */
	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1917
	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
Boqun Feng's avatar
Boqun Feng committed
1918
	raw_spin_unlock_irq_rcu_node(rnp);
1919

1920 1921 1922 1923 1924 1925 1926
	/*
	 * Apply per-leaf buffered online and offline operations to the
	 * rcu_node tree.  Note that this new grace period need not wait
	 * for subsequent online CPUs, and that quiescent-state forcing
	 * will handle subsequent offline CPUs.
	 */
	rcu_for_each_leaf_node(rsp, rnp) {
1927
		rcu_gp_slow(rsp, gp_preinit_delay);
1928
		raw_spin_lock_irq_rcu_node(rnp);
1929 1930 1931
		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
		    !rnp->wait_blkd_tasks) {
			/* Nothing to do on this leaf rcu_node structure. */
Boqun Feng's avatar
Boqun Feng committed
1932
			raw_spin_unlock_irq_rcu_node(rnp);
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
			continue;
		}

		/* Record old state, apply changes to ->qsmaskinit field. */
		oldmask = rnp->qsmaskinit;
		rnp->qsmaskinit = rnp->qsmaskinitnext;

		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
		if (!oldmask != !rnp->qsmaskinit) {
			if (!oldmask) /* First online CPU for this rcu_node. */
				rcu_init_new_rnp(rnp);
			else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
				rnp->wait_blkd_tasks = true;
			else /* Last offline CPU and can propagate. */
				rcu_cleanup_dead_rnp(rnp);
		}

		/*
		 * If all waited-on tasks from prior grace period are
		 * done, and if all this rcu_node structure's CPUs are
		 * still offline, propagate up the rcu_node tree and
		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
		 * rcu_node structure's CPUs has since come back online,
		 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
		 * checks for this, so just call it unconditionally).
		 */
		if (rnp->wait_blkd_tasks &&
		    (!rcu_preempt_has_tasks(rnp) ||
		     rnp->qsmaskinit)) {
			rnp->wait_blkd_tasks = false;
			rcu_cleanup_dead_rnp(rnp);
		}

Boqun Feng's avatar
Boqun Feng committed
1966
		raw_spin_unlock_irq_rcu_node(rnp);
1967
	}
1968 1969 1970 1971 1972 1973 1974 1975

	/*
	 * Set the quiescent-state-needed bits in all the rcu_node
	 * structures for all currently online CPUs in breadth-first order,
	 * starting from the root rcu_node structure, relying on the layout
	 * of the tree within the rsp->node[] array.  Note that other CPUs
	 * will access only the leaves of the hierarchy, thus seeing that no
	 * grace period is in progress, at least until the corresponding
1976
	 * leaf node has been initialized.
1977 1978 1979 1980 1981
	 *
	 * The grace period cannot complete until the initialization
	 * process finishes, because this kthread handles both.
	 */
	rcu_for_each_node_breadth_first(rsp, rnp) {
1982
		rcu_gp_slow(rsp, gp_init_delay);
1983
		raw_spin_lock_irq_rcu_node(rnp);
1984
		rdp = this_cpu_ptr(rsp->rda);
1985 1986
		rcu_preempt_check_blocked_tasks(rnp);
		rnp->qsmask = rnp->qsmaskinit;
1987
		WRITE_ONCE(rnp->gpnum, rsp->gpnum);
1988
		if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1989
			WRITE_ONCE(rnp->completed, rsp->completed);
1990
		if (rnp == rdp->mynode)
1991
			(void)__note_gp_changes(rsp, rnp, rdp);
1992 1993 1994 1995
		rcu_preempt_boost_start_gp(rnp);
		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
					    rnp->level, rnp->grplo,
					    rnp->grphi, rnp->qsmask);
Boqun Feng's avatar
Boqun Feng committed
1996
		raw_spin_unlock_irq_rcu_node(rnp);
1997
		cond_resched_rcu_qs();
1998
		WRITE_ONCE(rsp->gp_activity, jiffies);
1999
	}
2000

2001
	return true;
2002
}
2003

2004
/*
2005 2006
 * Helper function for swait_event_idle() wakeup at force-quiescent-state
 * time.
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
 */
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
{
	struct rcu_node *rnp = rcu_get_root(rsp);

	/* Someone like call_rcu() requested a force-quiescent-state scan. */
	*gfp = READ_ONCE(rsp->gp_flags);
	if (*gfp & RCU_GP_FLAG_FQS)
		return true;

	/* The current grace period has completed. */
	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
		return true;

	return false;
}

2024 2025 2026
/*
 * Do one round of quiescent-state forcing.
 */
2027
static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
2028 2029 2030
{
	struct rcu_node *rnp = rcu_get_root(rsp);

2031
	WRITE_ONCE(rsp->gp_activity, jiffies);
2032
	rsp->n_force_qs++;
2033
	if (first_time) {
2034
		/* Collect dyntick-idle snapshots. */
2035
		force_qs_rnp(rsp, dyntick_save_progress_counter);
2036 2037
	} else {
		/* Handle dyntick-idle and offline CPUs. */
2038
		force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
2039 2040
	}
	/* Clear flag to prevent immediate re-entry. */
2041
	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2042
		raw_spin_lock_irq_rcu_node(rnp);
2043 2044
		WRITE_ONCE(rsp->gp_flags,
			   READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
Boqun Feng's avatar
Boqun Feng committed
2045
		raw_spin_unlock_irq_rcu_node(rnp);
2046 2047 2048
	}
}

2049 2050 2051
/*
 * Clean up after the old grace period.
 */
2052
static void rcu_gp_cleanup(struct rcu_state *rsp)
2053 2054
{
	unsigned long gp_duration;
2055
	bool needgp = false;
2056 2057
	struct rcu_data *rdp;
	struct rcu_node *rnp = rcu_get_root(rsp);
2058
	struct swait_queue_head *sq;
2059

2060
	WRITE_ONCE(rsp->gp_activity, jiffies);
2061
	raw_spin_lock_irq_rcu_node(rnp);
2062 2063 2064
	gp_duration = jiffies - rsp->gp_start;
	if (gp_duration > rsp->gp_max)
		rsp->gp_max = gp_duration;
2065

2066 2067 2068 2069 2070 2071 2072 2073
	/*
	 * We know the grace period is complete, but to everyone else
	 * it appears to still be ongoing.  But it is also the case
	 * that to everyone else it looks like there is nothing that
	 * they can do to advance the grace period.  It is therefore
	 * safe for us to drop the lock in order to mark the grace
	 * period as completed in all of the rcu_node structures.
	 */
Boqun Feng's avatar
Boqun Feng committed
2074
	raw_spin_unlock_irq_rcu_node(rnp);
2075

2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
	/*
	 * Propagate new ->completed value to rcu_node structures so
	 * that other CPUs don't have to wait until the start of the next
	 * grace period to process their callbacks.  This also avoids
	 * some nasty RCU grace-period initialization races by forcing
	 * the end of the current grace period to be completely recorded in
	 * all of the rcu_node structures before the beginning of the next
	 * grace period is recorded in any of the rcu_node structures.
	 */
	rcu_for_each_node_breadth_first(rsp, rnp) {
2086
		raw_spin_lock_irq_rcu_node(rnp);
2087 2088
		WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
		WARN_ON_ONCE(rnp->qsmask);
2089
		WRITE_ONCE(rnp->completed, rsp->gpnum);
2090 2091
		rdp = this_cpu_ptr(rsp->rda);
		if (rnp == rdp->mynode)
2092
			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2093
		/* smp_mb() provided by prior unlock-lock pair. */
2094
		needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
2095
		sq = rcu_nocb_gp_get(rnp);
Boqun Feng's avatar
Boqun Feng committed
2096
		raw_spin_unlock_irq_rcu_node(rnp);
2097
		rcu_nocb_gp_cleanup(sq);
2098
		cond_resched_rcu_qs();
2099
		WRITE_ONCE(rsp->gp_activity, jiffies);
2100
		rcu_gp_slow(rsp, gp_cleanup_delay);
2101
	}
2102
	rnp = rcu_get_root(rsp);
2103
	raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
2104

2105
	/* Declare grace period done. */
2106
	WRITE_ONCE(rsp->completed, rsp->gpnum);
2107
	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2108
	rsp->gp_state = RCU_GP_IDLE;
2109
	/* Check for GP requests since above loop. */
2110
	rdp = this_cpu_ptr(rsp->rda);
2111
	if (need_any_future_gp(rnp)) {
2112 2113
		trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
				  TPS("CleanupMore"));
2114 2115
		needgp = true;
	}
2116
	/* Advance CBs to reduce false positives below. */
2117
	if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
2118
		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2119
		trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2120 2121
				       TPS("newreq"));
	}
2122
	WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
Boqun Feng's avatar
Boqun Feng committed
2123
	raw_spin_unlock_irq_rcu_node(rnp);
2124 2125 2126 2127 2128 2129 2130
}

/*
 * Body of kthread that handles grace periods.
 */
static int __noreturn rcu_gp_kthread(void *arg)
{
2131
	bool first_gp_fqs;
2132
	int gf;
2133
	unsigned long j;
2134
	int ret;
2135 2136 2137
	struct rcu_state *rsp = arg;
	struct rcu_node *rnp = rcu_get_root(rsp);

2138 2139 2140 2141 2142 2143
	/* Check for early-boot work. */
	raw_spin_lock_irq_rcu_node(rnp);
	if (need_any_future_gp(rnp))
		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
	raw_spin_unlock_irq_rcu_node(rnp);

2144
	rcu_bind_gp_kthread();
2145 2146 2147 2148
	for (;;) {

		/* Handle grace-period start. */
		for (;;) {
2149
			trace_rcu_grace_period(rsp->name,
2150
					       READ_ONCE(rsp->gpnum),
2151
					       TPS("reqwait"));
2152
			rsp->gp_state = RCU_GP_WAIT_GPS;
2153 2154
			swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
						     RCU_GP_FLAG_INIT);
2155
			rsp->gp_state = RCU_GP_DONE_GPS;
2156
			/* Locking provides needed memory barrier. */
2157
			if (rcu_gp_init(rsp))
2158
				break;
2159
			cond_resched_rcu_qs();
2160
			WRITE_ONCE(rsp->gp_activity, jiffies);
2161
			WARN_ON(signal_pending(current));
2162
			trace_rcu_grace_period(rsp->name,
2163
					       READ_ONCE(rsp->gpnum),
2164
					       TPS("reqwaitsig"));
2165
		}
2166

2167
		/* Handle quiescent-state forcing. */
2168
		first_gp_fqs = true;
2169 2170 2171 2172 2173
		j = jiffies_till_first_fqs;
		if (j > HZ) {
			j = HZ;
			jiffies_till_first_fqs = HZ;
		}
2174
		ret = 0;
2175
		for (;;) {
2176
			if (!ret) {
2177
				rsp->jiffies_force_qs = jiffies + j;
2178 2179 2180
				WRITE_ONCE(rsp->jiffies_kick_kthreads,
					   jiffies + 3 * j);
			}
2181
			trace_rcu_grace_period(rsp->name,
2182
					       READ_ONCE(rsp->gpnum),
2183
					       TPS("fqswait"));
2184
			rsp->gp_state = RCU_GP_WAIT_FQS;
2185
			ret = swait_event_idle_timeout(rsp->gp_wq,
2186
					rcu_gp_fqs_check_wake(rsp, &gf), j);
2187
			rsp->gp_state = RCU_GP_DOING_FQS;
2188
			/* Locking provides needed memory barriers. */
2189
			/* If grace period done, leave loop. */
2190
			if (!READ_ONCE(rnp->qsmask) &&
2191
			    !rcu_preempt_blocked_readers_cgp(rnp))
2192
				break;
2193
			/* If time for quiescent-state forcing, do it. */
2194 2195
			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
			    (gf & RCU_GP_FLAG_FQS)) {
2196
				trace_rcu_grace_period(rsp->name,
2197
						       READ_ONCE(rsp->gpnum),
2198
						       TPS("fqsstart"));
2199 2200
				rcu_gp_fqs(rsp, first_gp_fqs);
				first_gp_fqs = false;
2201
				trace_rcu_grace_period(rsp->name,
2202
						       READ_ONCE(rsp->gpnum),
2203
						       TPS("fqsend"));
2204
				cond_resched_rcu_qs();
2205
				WRITE_ONCE(rsp->gp_activity, jiffies);
2206 2207 2208 2209 2210 2211 2212 2213 2214
				ret = 0; /* Force full wait till next FQS. */
				j = jiffies_till_next_fqs;
				if (j > HZ) {
					j = HZ;
					jiffies_till_next_fqs = HZ;
				} else if (j < 1) {
					j = 1;
					jiffies_till_next_fqs = 1;
				}
2215 2216
			} else {
				/* Deal with stray signal. */
2217
				cond_resched_rcu_qs();
2218
				WRITE_ONCE(rsp->gp_activity, jiffies);
2219
				WARN_ON(signal_pending(current));
2220
				trace_rcu_grace_period(rsp->name,
2221
						       READ_ONCE(rsp->gpnum),
2222
						       TPS("fqswaitsig"));
2223 2224 2225 2226 2227 2228
				ret = 1; /* Keep old FQS timing. */
				j = jiffies;
				if (time_after(jiffies, rsp->jiffies_force_qs))
					j = 1;
				else
					j = rsp->jiffies_force_qs - j;
2229
			}
2230
		}
2231 2232

		/* Handle grace-period end. */
2233
		rsp->gp_state = RCU_GP_CLEANUP;
2234
		rcu_gp_cleanup(rsp);
2235
		rsp->gp_state = RCU_GP_CLEANED;
2236 2237 2238
	}
}

2239
/*
2240 2241 2242 2243 2244 2245 2246
 * Report a full set of quiescent states to the specified rcu_state data
 * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
 * kthread if another grace period is required.  Whether we wake
 * the grace-period kthread or it awakens itself for the next round
 * of quiescent-state forcing, that kthread will clean up after the
 * just-completed grace period.  Note that the caller must hold rnp->lock,
 * which is released before return.
2247
 */
2248
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2249
	__releases(rcu_get_root(rsp)->lock)
2250
{
2251
	raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
2252
	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2253
	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
Boqun Feng's avatar
Boqun Feng committed
2254
	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2255
	rcu_gp_kthread_wake(rsp);
2256 2257
}

2258
/*
2259 2260 2261
 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
 * Allows quiescent states for a group of CPUs to be reported at one go
 * to the specified rcu_node structure, though all the CPUs in the group
2262 2263 2264 2265 2266
 * must be represented by the same rcu_node structure (which need not be a
 * leaf rcu_node structure, though it often will be).  The gps parameter
 * is the grace-period snapshot, which means that the quiescent states
 * are valid only if rnp->gpnum is equal to gps.  That structure's lock
 * must be held upon entry, and it is released before return.
2267 2268
 */
static void
2269
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2270
		  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2271 2272
	__releases(rnp->lock)
{
2273
	unsigned long oldmask = 0;
2274 2275
	struct rcu_node *rnp_c;

2276
	raw_lockdep_assert_held_rcu_node(rnp);
2277

2278 2279
	/* Walk up the rcu_node hierarchy. */
	for (;;) {
2280
		if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2281

2282 2283 2284 2285
			/*
			 * Our bit has already been cleared, or the
			 * relevant grace period is already over, so done.
			 */
Boqun Feng's avatar
Boqun Feng committed
2286
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2287 2288
			return;
		}
2289
		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2290 2291
		WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
			     rcu_preempt_blocked_readers_cgp(rnp));
2292
		rnp->qsmask &= ~mask;
2293 2294 2295 2296
		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
						 mask, rnp->qsmask, rnp->level,
						 rnp->grplo, rnp->grphi,
						 !!rnp->gp_tasks);
2297
		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2298 2299

			/* Other bits still set at this level, so done. */
Boqun Feng's avatar
Boqun Feng committed
2300
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2301 2302 2303 2304 2305 2306 2307 2308 2309
			return;
		}
		mask = rnp->grpmask;
		if (rnp->parent == NULL) {

			/* No more levels.  Exit loop holding root lock. */

			break;
		}
Boqun Feng's avatar
Boqun Feng committed
2310
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2311
		rnp_c = rnp;
2312
		rnp = rnp->parent;
2313
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2314
		oldmask = rnp_c->qsmask;
2315 2316 2317 2318
	}

	/*
	 * Get here if we are the last CPU to pass through a quiescent
2319
	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2320
	 * to clean up and start the next grace period if one is needed.
2321
	 */
2322
	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
2323 2324
}

2325 2326 2327 2328 2329 2330 2331
/*
 * Record a quiescent state for all tasks that were previously queued
 * on the specified rcu_node structure and that were blocking the current
 * RCU grace period.  The caller must hold the specified rnp->lock with
 * irqs disabled, and this lock is released upon return, but irqs remain
 * disabled.
 */
2332
static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2333 2334 2335
				      struct rcu_node *rnp, unsigned long flags)
	__releases(rnp->lock)
{
2336
	unsigned long gps;
2337 2338 2339
	unsigned long mask;
	struct rcu_node *rnp_p;

2340
	raw_lockdep_assert_held_rcu_node(rnp);
2341 2342
	if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
	    rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
Boqun Feng's avatar
Boqun Feng committed
2343
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2344 2345 2346 2347 2348 2349
		return;  /* Still need more quiescent states! */
	}

	rnp_p = rnp->parent;
	if (rnp_p == NULL) {
		/*
2350 2351
		 * Only one rcu_node structure in the tree, so don't
		 * try to report up to its nonexistent parent!
2352 2353 2354 2355 2356
		 */
		rcu_report_qs_rsp(rsp, flags);
		return;
	}

2357 2358
	/* Report up the rest of the hierarchy, tracking current ->gpnum. */
	gps = rnp->gpnum;
2359
	mask = rnp->grpmask;
Boqun Feng's avatar
Boqun Feng committed
2360
	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2361
	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2362
	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2363 2364
}

2365
/*
2366
 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2367
 * structure.  This must be called from the specified CPU.
2368 2369
 */
static void
2370
rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2371 2372 2373
{
	unsigned long flags;
	unsigned long mask;
2374
	bool needwake;
2375 2376 2377
	struct rcu_node *rnp;

	rnp = rdp->mynode;
2378
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2379 2380
	if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
	    rnp->completed == rnp->gpnum || rdp->gpwrap) {
2381 2382

		/*
2383 2384 2385 2386
		 * The grace period in which this quiescent state was
		 * recorded has ended, so don't report it upwards.
		 * We will instead need a new quiescent state that lies
		 * within the current grace period.
2387
		 */
2388
		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2389
		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
Boqun Feng's avatar
Boqun Feng committed
2390
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2391 2392 2393 2394
		return;
	}
	mask = rdp->grpmask;
	if ((rnp->qsmask & mask) == 0) {
Boqun Feng's avatar
Boqun Feng committed
2395
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2396
	} else {
2397
		rdp->core_needs_qs = false;
2398 2399 2400 2401 2402

		/*
		 * This GP can't end until cpu checks in, so all of our
		 * callbacks can be processed during the next GP.
		 */
2403
		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2404

2405 2406
		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
		/* ^^^ Released rnp->lock */
2407 2408
		if (needwake)
			rcu_gp_kthread_wake(rsp);
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	}
}

/*
 * Check to see if there is a new grace period of which this CPU
 * is not yet aware, and if so, set up local rcu_data state for it.
 * Otherwise, see if this CPU has just passed through its first
 * quiescent state for this grace period, and record that fact if so.
 */
static void
rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
{
2421 2422
	/* Check for grace-period ends and beginnings. */
	note_gp_changes(rsp, rdp);
2423 2424 2425 2426 2427

	/*
	 * Does this CPU still need to do its part for current grace period?
	 * If no, return and let the other CPUs do their part as well.
	 */
2428
	if (!rdp->core_needs_qs)
2429 2430 2431 2432 2433 2434
		return;

	/*
	 * Was there a quiescent state since the beginning of the grace
	 * period? If no, then exit and wait for the next call.
	 */
2435
	if (rdp->cpu_no_qs.b.norm)
2436 2437
		return;

2438 2439 2440 2441
	/*
	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
	 * judge of that).
	 */
2442
	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2443 2444
}

2445 2446 2447 2448 2449
/*
 * Trace the fact that this CPU is going offline.
 */
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
2450 2451 2452
	RCU_TRACE(unsigned long mask;)
	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
	RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2453

2454 2455 2456
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
		return;

2457
	RCU_TRACE(mask = rdp->grpmask;)
2458 2459
	trace_rcu_grace_period(rsp->name,
			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2460
			       TPS("cpuofl"));
2461 2462
}

2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
/*
 * All CPUs for the specified rcu_node structure have gone offline,
 * and all tasks that were preempted within an RCU read-side critical
 * section while running on one of those CPUs have since exited their RCU
 * read-side critical section.  Some other CPU is reporting this fact with
 * the specified rcu_node structure's ->lock held and interrupts disabled.
 * This function therefore goes up the tree of rcu_node structures,
 * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
 * the leaf rcu_node structure's ->qsmaskinit field has already been
 * updated
 *
 * This function does check that the specified rcu_node structure has
 * all CPUs offline and no blocked tasks, so it is OK to invoke it
 * prematurely.  That said, invoking it after the fact will cost you
 * a needless lock acquisition.  So once it has done its work, don't
 * invoke it again.
 */
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
{
	long mask;
	struct rcu_node *rnp = rnp_leaf;

2485
	raw_lockdep_assert_held_rcu_node(rnp);
2486 2487
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
	    rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2488 2489 2490 2491 2492 2493
		return;
	for (;;) {
		mask = rnp->grpmask;
		rnp = rnp->parent;
		if (!rnp)
			break;
2494
		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2495
		rnp->qsmaskinit &= ~mask;
2496
		rnp->qsmask &= ~mask;
2497
		if (rnp->qsmaskinit) {
Boqun Feng's avatar
Boqun Feng committed
2498 2499
			raw_spin_unlock_rcu_node(rnp);
			/* irqs remain disabled. */
2500 2501
			return;
		}
Boqun Feng's avatar
Boqun Feng committed
2502
		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2503 2504 2505
	}
}

2506
/*
2507
 * The CPU has been completely removed, and some other CPU is reporting
2508 2509 2510
 * this fact from process context.  Do the remainder of the cleanup.
 * There can only be one CPU hotplug operation at a time, so no need for
 * explicit locking.
2511
 */
2512
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2513
{
2514
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2515
	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2516

2517 2518 2519
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
		return;

2520
	/* Adjust any no-longer-needed kthreads. */
Thomas Gleixner's avatar
Thomas Gleixner committed
2521
	rcu_boost_kthread_setaffinity(rnp, -1);
2522 2523 2524 2525 2526 2527
}

/*
 * Invoke any RCU callbacks that have made it to the end of their grace
 * period.  Thottle as specified by rdp->blimit.
 */
2528
static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2529 2530
{
	unsigned long flags;
2531 2532 2533
	struct rcu_head *rhp;
	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
	long bl, count;
2534

2535
	/* If no callbacks are ready, just return. */
2536 2537 2538 2539 2540 2541
	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
		trace_rcu_batch_start(rsp->name,
				      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
		trace_rcu_batch_end(rsp->name, 0,
				    !rcu_segcblist_empty(&rdp->cblist),
2542 2543
				    need_resched(), is_idle_task(current),
				    rcu_is_callbacks_kthread());
2544
		return;
2545
	}
2546 2547 2548

	/*
	 * Extract the list of ready callbacks, disabling to prevent
2549 2550
	 * races with call_rcu() from interrupt handlers.  Leave the
	 * callback counts, as rcu_barrier() needs to be conservative.
2551 2552
	 */
	local_irq_save(flags);
2553
	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2554
	bl = rdp->blimit;
2555 2556 2557
	trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2558 2559 2560
	local_irq_restore(flags);

	/* Invoke callbacks. */
2561 2562 2563 2564 2565 2566 2567 2568 2569
	rhp = rcu_cblist_dequeue(&rcl);
	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
		debug_rcu_head_unqueue(rhp);
		if (__rcu_reclaim(rsp->name, rhp))
			rcu_cblist_dequeued_lazy(&rcl);
		/*
		 * Stop only if limit reached and CPU has something to do.
		 * Note: The rcl structure counts down from zero.
		 */
2570
		if (-rcl.len >= bl &&
2571 2572
		    (need_resched() ||
		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2573 2574 2575 2576
			break;
	}

	local_irq_save(flags);
2577
	count = -rcl.len;
2578 2579
	trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
			    is_idle_task(current), rcu_is_callbacks_kthread());
2580

2581 2582
	/* Update counts and requeue any remaining callbacks. */
	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2583
	smp_mb(); /* List handling before counting for rcu_barrier(). */
2584
	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2585 2586

	/* Reinstate batch limit if we have worked down the excess. */
2587 2588
	count = rcu_segcblist_n_cbs(&rdp->cblist);
	if (rdp->blimit == LONG_MAX && count <= qlowmark)
2589 2590
		rdp->blimit = blimit;

2591
	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2592
	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2593 2594
		rdp->qlen_last_fqs_check = 0;
		rdp->n_force_qs_snap = rsp->n_force_qs;
2595 2596
	} else if (count < rdp->qlen_last_fqs_check - qhimark)
		rdp->qlen_last_fqs_check = count;
2597 2598 2599 2600 2601

	/*
	 * The following usually indicates a double call_rcu().  To track
	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
	 */
2602
	WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2603

2604 2605
	local_irq_restore(flags);

2606
	/* Re-invoke RCU core processing if there are callbacks remaining. */
2607
	if (rcu_segcblist_ready_cbs(&rdp->cblist))
2608
		invoke_rcu_core();
2609 2610 2611 2612 2613
}

/*
 * Check to see if this CPU is in a non-context-switch quiescent state
 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2614
 * Also schedule RCU core processing.
2615
 *
2616
 * This function must be called from hardirq context.  It is normally
2617
 * invoked from the scheduling-clock interrupt.
2618
 */
2619
void rcu_check_callbacks(int user)
2620
{
2621
	trace_rcu_utilization(TPS("Start scheduler-tick"));
2622
	increment_cpu_stall_ticks();
2623
	if (user || rcu_is_cpu_rrupt_from_idle()) {
2624 2625 2626 2627 2628

		/*
		 * Get here if this CPU took its interrupt from user
		 * mode or from the idle loop, and if this is not a
		 * nested interrupt.  In this case, the CPU is in
2629
		 * a quiescent state, so note it.
2630 2631
		 *
		 * No memory barrier is required here because both
2632 2633 2634
		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
		 * variables that other CPUs neither access nor modify,
		 * at least not while the corresponding CPU is online.
2635 2636
		 */

2637 2638
		rcu_sched_qs();
		rcu_bh_qs();
2639 2640 2641 2642 2643 2644 2645

	} else if (!in_softirq()) {

		/*
		 * Get here if this CPU did not take its interrupt from
		 * softirq, in other words, if it is not interrupting
		 * a rcu_bh read-side critical section.  This is an _bh
2646
		 * critical section, so note it.
2647 2648
		 */

2649
		rcu_bh_qs();
2650
	}
2651
	rcu_preempt_check_callbacks();
2652
	if (rcu_pending())
2653
		invoke_rcu_core();
Paul E. McKenney's avatar
Paul E. McKenney committed
2654 2655
	if (user)
		rcu_note_voluntary_context_switch(current);
2656
	trace_rcu_utilization(TPS("End scheduler-tick"));
2657 2658 2659 2660 2661
}

/*
 * Scan the leaf rcu_node structures, processing dyntick state for any that
 * have not yet encountered a quiescent state, using the function specified.
2662 2663
 * Also initiate boosting for any threads blocked on the root rcu_node.
 *
2664
 * The caller must have suppressed start of new grace periods.
2665
 */
2666
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2667 2668 2669 2670
{
	int cpu;
	unsigned long flags;
	unsigned long mask;
2671
	struct rcu_node *rnp;
2672

2673
	rcu_for_each_leaf_node(rsp, rnp) {
2674
		cond_resched_rcu_qs();
2675
		mask = 0;
2676
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2677
		if (rnp->qsmask == 0) {
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
			if (rcu_state_p == &rcu_sched_state ||
			    rsp != rcu_state_p ||
			    rcu_preempt_blocked_readers_cgp(rnp)) {
				/*
				 * No point in scanning bits because they
				 * are all zero.  But we might need to
				 * priority-boost blocked readers.
				 */
				rcu_initiate_boost(rnp, flags);
				/* rcu_initiate_boost() releases rnp->lock */
				continue;
			}
			if (rnp->parent &&
			    (rnp->parent->qsmask & rnp->grpmask)) {
				/*
				 * Race between grace-period
				 * initialization and task exiting RCU
				 * read-side critical section: Report.
				 */
				rcu_report_unblock_qs_rnp(rsp, rnp, flags);
				/* rcu_report_unblock_qs_rnp() rlses ->lock */
				continue;
			}
2701
		}
2702 2703
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2704
			if ((rnp->qsmask & bit) != 0) {
2705
				if (f(per_cpu_ptr(rsp->rda, cpu)))
2706 2707
					mask |= bit;
			}
2708
		}
2709
		if (mask != 0) {
2710 2711
			/* Idle/offline CPUs, report (releases rnp->lock. */
			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2712 2713
		} else {
			/* Nothing to do here, so just drop the lock. */
Boqun Feng's avatar
Boqun Feng committed
2714
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2715 2716 2717 2718 2719 2720 2721 2722
		}
	}
}

/*
 * Force quiescent states on reluctant CPUs, and also detect which
 * CPUs are in dyntick-idle mode.
 */
2723
static void force_quiescent_state(struct rcu_state *rsp)
2724 2725
{
	unsigned long flags;
2726 2727 2728 2729 2730
	bool ret;
	struct rcu_node *rnp;
	struct rcu_node *rnp_old = NULL;

	/* Funnel through hierarchy to reduce memory contention. */
2731
	rnp = __this_cpu_read(rsp->rda->mynode);
2732
	for (; rnp != NULL; rnp = rnp->parent) {
2733
		ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2734 2735 2736
		      !raw_spin_trylock(&rnp->fqslock);
		if (rnp_old != NULL)
			raw_spin_unlock(&rnp_old->fqslock);
2737
		if (ret)
2738 2739 2740 2741
			return;
		rnp_old = rnp;
	}
	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2742

2743
	/* Reached the root of the rcu_node tree, acquire lock. */
2744
	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2745
	raw_spin_unlock(&rnp_old->fqslock);
2746
	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
Boqun Feng's avatar
Boqun Feng committed
2747
		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2748
		return;  /* Someone beat us to it. */
2749
	}
2750
	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
Boqun Feng's avatar
Boqun Feng committed
2751
	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2752
	rcu_gp_kthread_wake(rsp);
2753 2754 2755
}

/*
2756 2757 2758
 * This does the RCU core processing work for the specified rcu_state
 * and rcu_data structures.  This may be called only from the CPU to
 * whom the rdp belongs.
2759 2760
 */
static void
2761
__rcu_process_callbacks(struct rcu_state *rsp)
2762 2763
{
	unsigned long flags;
2764
	bool needwake;
2765
	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2766
	struct rcu_node *rnp;
2767

2768
	WARN_ON_ONCE(!rdp->beenonline);
2769

2770 2771 2772
	/* Update RCU state based on any recent quiescent states. */
	rcu_check_quiescent_state(rsp, rdp);

2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
	/* No grace period and unregistered callbacks? */
	if (!rcu_gp_in_progress(rsp) &&
	    rcu_segcblist_is_enabled(&rdp->cblist)) {
		local_irq_save(flags);
		if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
			local_irq_restore(flags);
		} else {
			rnp = rdp->mynode;
			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
			needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
			if (needwake)
				rcu_gp_kthread_wake(rsp);
		}
2787 2788 2789
	}

	/* If there are callbacks ready, invoke them. */
2790
	if (rcu_segcblist_ready_cbs(&rdp->cblist))
2791
		invoke_rcu_callbacks(rsp, rdp);
2792 2793 2794

	/* Do any needed deferred wakeups of rcuo kthreads. */
	do_nocb_deferred_wakeup(rdp);
2795 2796
}

2797
/*
2798
 * Do RCU core processing for the current CPU.
2799
 */
2800
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
2801
{
2802 2803
	struct rcu_state *rsp;

2804 2805
	if (cpu_is_offline(smp_processor_id()))
		return;
2806
	trace_rcu_utilization(TPS("Start RCU core"));
2807 2808
	for_each_rcu_flavor(rsp)
		__rcu_process_callbacks(rsp);
2809
	trace_rcu_utilization(TPS("End RCU core"));
2810 2811
}

2812
/*
2813 2814 2815
 * Schedule RCU callback invocation.  If the specified type of RCU
 * does not support RCU priority boosting, just do a direct call,
 * otherwise wake up the per-CPU kernel kthread.  Note that because we
2816
 * are running on the current CPU with softirqs disabled, the
2817
 * rcu_cpu_kthread_task cannot disappear out from under us.
2818
 */
2819
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2820
{
2821
	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2822
		return;
2823 2824
	if (likely(!rsp->boost)) {
		rcu_do_batch(rsp, rdp);
2825 2826
		return;
	}
2827
	invoke_rcu_callbacks_kthread();
2828 2829
}

2830
static void invoke_rcu_core(void)
2831
{
2832 2833
	if (cpu_online(smp_processor_id()))
		raise_softirq(RCU_SOFTIRQ);
2834 2835
}

2836 2837 2838 2839 2840
/*
 * Handle any core-RCU processing required by a call_rcu() invocation.
 */
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
			    struct rcu_head *head, unsigned long flags)
2841
{
2842 2843
	bool needwake;

2844 2845 2846 2847
	/*
	 * If called from an extended quiescent state, invoke the RCU
	 * core in order to force a re-evaluation of RCU's idleness.
	 */
2848
	if (!rcu_is_watching())
2849 2850
		invoke_rcu_core();

2851
	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2852
	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2853
		return;
2854

2855 2856 2857 2858 2859 2860 2861
	/*
	 * Force the grace period if too many callbacks or too long waiting.
	 * Enforce hysteresis, and don't invoke force_quiescent_state()
	 * if some other CPU has recently done so.  Also, don't bother
	 * invoking force_quiescent_state() if the newly enqueued callback
	 * is the only one waiting for a grace period to complete.
	 */
2862 2863
	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
		     rdp->qlen_last_fqs_check + qhimark)) {
2864 2865

		/* Are we ignoring a completed grace period? */
2866
		note_gp_changes(rsp, rdp);
2867 2868 2869

		/* Start a new grace period if one not already started. */
		if (!rcu_gp_in_progress(rsp)) {
2870
			struct rcu_node *rnp = rdp->mynode;
2871

2872 2873 2874
			raw_spin_lock_rcu_node(rnp);
			needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
			raw_spin_unlock_rcu_node(rnp);
2875 2876
			if (needwake)
				rcu_gp_kthread_wake(rsp);
2877 2878 2879 2880
		} else {
			/* Give the grace period a kick. */
			rdp->blimit = LONG_MAX;
			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2881
			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2882
				force_quiescent_state(rsp);
2883
			rdp->n_force_qs_snap = rsp->n_force_qs;
2884
			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2885
		}
2886
	}
2887 2888
}

2889 2890 2891 2892 2893 2894 2895
/*
 * RCU callback function to leak a callback.
 */
static void rcu_leak_callback(struct rcu_head *rhp)
{
}

Paul E. McKenney's avatar
Paul E. McKenney committed
2896 2897 2898 2899 2900 2901
/*
 * Helper function for call_rcu() and friends.  The cpu argument will
 * normally be -1, indicating "currently running CPU".  It may specify
 * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
 * is expected to specify a CPU.
 */
2902
static void
2903
__call_rcu(struct rcu_head *head, rcu_callback_t func,
Paul E. McKenney's avatar
Paul E. McKenney committed
2904
	   struct rcu_state *rsp, int cpu, bool lazy)
2905 2906 2907 2908
{
	unsigned long flags;
	struct rcu_data *rdp;

2909 2910 2911
	/* Misaligned rcu_head! */
	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));

2912
	if (debug_rcu_head_queue(head)) {
2913 2914 2915 2916 2917 2918 2919
		/*
		 * Probable double call_rcu(), so leak the callback.
		 * Use rcu:rcu_callback trace event to find the previous
		 * time callback was passed to __call_rcu().
		 */
		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
			  head, head->func);
2920
		WRITE_ONCE(head->func, rcu_leak_callback);
2921 2922
		return;
	}
2923 2924 2925
	head->func = func;
	head->next = NULL;
	local_irq_save(flags);
2926
	rdp = this_cpu_ptr(rsp->rda);
2927 2928

	/* Add the callback to our list. */
2929
	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
Paul E. McKenney's avatar
Paul E. McKenney committed
2930 2931 2932 2933
		int offline;

		if (cpu != -1)
			rdp = per_cpu_ptr(rsp->rda, cpu);
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
		if (likely(rdp->mynode)) {
			/* Post-boot, so this should be for a no-CBs CPU. */
			offline = !__call_rcu_nocb(rdp, head, lazy, flags);
			WARN_ON_ONCE(offline);
			/* Offline CPU, _call_rcu() illegal, leak callback.  */
			local_irq_restore(flags);
			return;
		}
		/*
		 * Very early boot, before rcu_init().  Initialize if needed
		 * and then drop through to queue the callback.
		 */
		BUG_ON(cpu != -1);
2947
		WARN_ON_ONCE(!rcu_is_watching());
2948 2949
		if (rcu_segcblist_empty(&rdp->cblist))
			rcu_segcblist_init(&rdp->cblist);
2950
	}
2951 2952
	rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
	if (!lazy)
2953
		rcu_idle_count_callbacks_posted();
2954

2955 2956
	if (__is_kfree_rcu_offset((unsigned long)func))
		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2957 2958
					 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
					 rcu_segcblist_n_cbs(&rdp->cblist));
2959
	else
2960 2961 2962
		trace_rcu_callback(rsp->name, head,
				   rcu_segcblist_n_lazy_cbs(&rdp->cblist),
				   rcu_segcblist_n_cbs(&rdp->cblist));
2963

2964 2965
	/* Go handle any RCU core processing required. */
	__call_rcu_core(rsp, rdp, head, flags);
2966 2967 2968
	local_irq_restore(flags);
}

2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
/**
 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_sched() assumes
 * that the read-side critical sections end on enabling of preemption
 * or on voluntary preemption.
2979 2980 2981 2982
 * RCU read-side critical sections are delimited by:
 *
 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
 * - anything that disables preemption.
2983 2984 2985 2986 2987
 *
 *  These may be nested.
 *
 * See the description of call_rcu() for more detailed information on
 * memory ordering guarantees.
2988
 */
2989
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
2990
{
Paul E. McKenney's avatar
Paul E. McKenney committed
2991
	__call_rcu(head, func, &rcu_sched_state, -1, 0);
2992
}
2993
EXPORT_SYMBOL_GPL(call_rcu_sched);
2994

2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
/**
 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_bh() assumes
 * that the read-side critical sections end on completion of a softirq
 * handler. This means that read-side critical sections in process
 * context must not be interrupted by softirqs. This interface is to be
 * used when most of the read-side critical sections are in softirq context.
3007 3008 3009 3010 3011 3012
 * RCU read-side critical sections are delimited by:
 *
 * - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context, OR
 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
 *
 * These may be nested.
3013 3014 3015
 *
 * See the description of call_rcu() for more detailed information on
 * memory ordering guarantees.
3016
 */
3017
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3018
{
Paul E. McKenney's avatar
Paul E. McKenney committed
3019
	__call_rcu(head, func, &rcu_bh_state, -1, 0);
3020 3021 3022
}
EXPORT_SYMBOL_GPL(call_rcu_bh);

3023 3024 3025 3026 3027 3028 3029 3030
/*
 * Queue an RCU callback for lazy invocation after a grace period.
 * This will likely be later named something like "call_rcu_lazy()",
 * but this change will require some way of tagging the lazy RCU
 * callbacks in the list of pending callbacks. Until then, this
 * function may only be called from __kfree_rcu().
 */
void kfree_call_rcu(struct rcu_head *head,
3031
		    rcu_callback_t func)
3032
{
3033
	__call_rcu(head, func, rcu_state_p, -1, 1);
3034 3035 3036
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);

3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
/*
 * Because a context switch is a grace period for RCU-sched and RCU-bh,
 * any blocking grace-period wait automatically implies a grace period
 * if there is only one CPU online at any point time during execution
 * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
 * occasionally incorrectly indicate that there are multiple CPUs online
 * when there was in fact only one the whole time, as this just adds
 * some overhead: RCU still operates correctly.
 */
static inline int rcu_blocking_is_gp(void)
{
3048 3049
	int ret;

3050
	might_sleep();  /* Check for RCU read-side critical section. */
3051 3052 3053 3054
	preempt_disable();
	ret = num_online_cpus() <= 1;
	preempt_enable();
	return ret;
3055 3056
}

3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068
/**
 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
 *
 * Control will return to the caller some time after a full rcu-sched
 * grace period has elapsed, in other words after all currently executing
 * rcu-sched read-side critical sections have completed.   These read-side
 * critical sections are delimited by rcu_read_lock_sched() and
 * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
 * local_irq_disable(), and so on may be used in place of
 * rcu_read_lock_sched().
 *
 * This means that all preempt_disable code sequences, including NMI and
3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090
 * non-threaded hardware-interrupt handlers, in progress on entry will
 * have completed before this primitive returns.  However, this does not
 * guarantee that softirq handlers will have completed, since in some
 * kernels, these handlers can run in process context, and can block.
 *
 * Note that this guarantee implies further memory-ordering guarantees.
 * On systems with more than one CPU, when synchronize_sched() returns,
 * each CPU is guaranteed to have executed a full memory barrier since the
 * end of its last RCU-sched read-side critical section whose beginning
 * preceded the call to synchronize_sched().  In addition, each CPU having
 * an RCU read-side critical section that extends beyond the return from
 * synchronize_sched() is guaranteed to have executed a full memory barrier
 * after the beginning of synchronize_sched() and before the beginning of
 * that RCU read-side critical section.  Note that these guarantees include
 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
 * that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked synchronize_sched(), which returned
 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 * to have executed a full memory barrier during the execution of
 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
 * again only if the system has more than one CPU).
3091 3092 3093
 */
void synchronize_sched(void)
{
3094 3095 3096 3097
	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_sched() in RCU-sched read-side critical section");
3098 3099
	if (rcu_blocking_is_gp())
		return;
3100
	if (rcu_gp_is_expedited())
3101 3102 3103
		synchronize_sched_expedited();
	else
		wait_rcu_gp(call_rcu_sched);
3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114
}
EXPORT_SYMBOL_GPL(synchronize_sched);

/**
 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
 *
 * Control will return to the caller some time after a full rcu_bh grace
 * period has elapsed, in other words after all currently executing rcu_bh
 * read-side critical sections have completed.  RCU read-side critical
 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
 * and may be nested.
3115 3116 3117
 *
 * See the description of synchronize_sched() for more detailed information
 * on memory ordering guarantees.
3118 3119 3120
 */
void synchronize_rcu_bh(void)
{
3121 3122 3123 3124
	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3125 3126
	if (rcu_blocking_is_gp())
		return;
3127
	if (rcu_gp_is_expedited())
3128 3129 3130
		synchronize_rcu_bh_expedited();
	else
		wait_rcu_gp(call_rcu_bh);
3131 3132 3133
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);

3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153
/**
 * get_state_synchronize_rcu - Snapshot current RCU state
 *
 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
 * to determine whether or not a full grace period has elapsed in the
 * meantime.
 */
unsigned long get_state_synchronize_rcu(void)
{
	/*
	 * Any prior manipulation of RCU-protected data must happen
	 * before the load from ->gpnum.
	 */
	smp_mb();  /* ^^^ */

	/*
	 * Make sure this load happens before the purportedly
	 * time-consuming work between get_state_synchronize_rcu()
	 * and cond_synchronize_rcu().
	 */
3154
	return smp_load_acquire(&rcu_state_p->gpnum);
3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);

/**
 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
 *
 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
 *
 * If a full RCU grace period has elapsed since the earlier call to
 * get_state_synchronize_rcu(), just return.  Otherwise, invoke
 * synchronize_rcu() to wait for a full grace period.
 *
 * Yes, this function does not take counter wrap into account.  But
 * counter wrap is harmless.  If the counter wraps, we have waited for
 * more than 2 billion grace periods (and way more on a 64-bit system!),
 * so waiting for one additional grace period should be just fine.
 */
void cond_synchronize_rcu(unsigned long oldstate)
{
	unsigned long newstate;

	/*
	 * Ensure that this load happens before any RCU-destructive
	 * actions the caller might carry out after we return.
	 */
3180
	newstate = smp_load_acquire(&rcu_state_p->completed);
3181 3182 3183 3184 3185
	if (ULONG_CMP_GE(oldstate, newstate))
		synchronize_rcu();
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);

3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
/**
 * get_state_synchronize_sched - Snapshot current RCU-sched state
 *
 * Returns a cookie that is used by a later call to cond_synchronize_sched()
 * to determine whether or not a full grace period has elapsed in the
 * meantime.
 */
unsigned long get_state_synchronize_sched(void)
{
	/*
	 * Any prior manipulation of RCU-protected data must happen
	 * before the load from ->gpnum.
	 */
	smp_mb();  /* ^^^ */

	/*
	 * Make sure this load happens before the purportedly
	 * time-consuming work between get_state_synchronize_sched()
	 * and cond_synchronize_sched().
	 */
	return smp_load_acquire(&rcu_sched_state.gpnum);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_sched);

/**
 * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
 *
 * @oldstate: return value from earlier call to get_state_synchronize_sched()
 *
 * If a full RCU-sched grace period has elapsed since the earlier call to
 * get_state_synchronize_sched(), just return.  Otherwise, invoke
 * synchronize_sched() to wait for a full grace period.
 *
 * Yes, this function does not take counter wrap into account.  But
 * counter wrap is harmless.  If the counter wraps, we have waited for
 * more than 2 billion grace periods (and way more on a 64-bit system!),
 * so waiting for one additional grace period should be just fine.
 */
void cond_synchronize_sched(unsigned long oldstate)
{
	unsigned long newstate;

	/*
	 * Ensure that this load happens before any RCU-destructive
	 * actions the caller might carry out after we return.
	 */
	newstate = smp_load_acquire(&rcu_sched_state.completed);
	if (ULONG_CMP_GE(oldstate, newstate))
		synchronize_sched();
}
EXPORT_SYMBOL_GPL(cond_synchronize_sched);

3238 3239 3240 3241 3242 3243 3244 3245 3246
/*
 * Check to see if there is any immediate RCU-related work to be done
 * by the current CPU, for the specified type of RCU, returning 1 if so.
 * The checks are in order of increasing expense: checks that can be
 * carried out against CPU-local state are performed first.  However,
 * we must check for CPU stalls first, else we might not get a chance.
 */
static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
{
3247 3248
	struct rcu_node *rnp = rdp->mynode;

3249 3250 3251
	/* Check for CPU stalls, if enabled. */
	check_cpu_stall(rsp, rdp);

3252 3253 3254 3255
	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
	if (rcu_nohz_full_cpu(rsp))
		return 0;

3256
	/* Is the RCU core waiting for a quiescent state from this CPU? */
3257
	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
3258 3259 3260
		return 1;

	/* Does this CPU have callbacks ready to invoke? */
3261
	if (rcu_segcblist_ready_cbs(&rdp->cblist))
3262 3263 3264
		return 1;

	/* Has RCU gone idle with this CPU needing another grace period? */
3265 3266 3267
	if (!rcu_gp_in_progress(rsp) &&
	    rcu_segcblist_is_enabled(&rdp->cblist) &&
	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3268 3269 3270
		return 1;

	/* Has another RCU grace period completed?  */
3271
	if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
3272 3273 3274
		return 1;

	/* Has a new RCU grace period started? */
3275
	if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3276
	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3277 3278
		return 1;

3279
	/* Does this CPU need a deferred NOCB wakeup? */
3280
	if (rcu_nocb_need_deferred_wakeup(rdp))
3281 3282
		return 1;

3283 3284 3285 3286 3287 3288 3289 3290 3291
	/* nothing to do */
	return 0;
}

/*
 * Check to see if there is any immediate RCU-related work to be done
 * by the current CPU, returning 1 if so.  This function is part of the
 * RCU implementation; it is -not- an exported member of the RCU API.
 */
3292
static int rcu_pending(void)
3293
{
3294 3295 3296
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp)
3297
		if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3298 3299
			return 1;
	return 0;
3300 3301 3302
}

/*
3303 3304 3305
 * Return true if the specified CPU has any callback.  If all_lazy is
 * non-NULL, store an indication of whether all callbacks are lazy.
 * (If there are no callbacks, all of them are deemed to be lazy.)
3306
 */
3307
static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3308
{
3309 3310 3311
	bool al = true;
	bool hc = false;
	struct rcu_data *rdp;
3312 3313
	struct rcu_state *rsp;

3314
	for_each_rcu_flavor(rsp) {
3315
		rdp = this_cpu_ptr(rsp->rda);
3316
		if (rcu_segcblist_empty(&rdp->cblist))
3317 3318
			continue;
		hc = true;
3319
		if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
3320
			al = false;
3321 3322
			break;
		}
3323 3324 3325 3326
	}
	if (all_lazy)
		*all_lazy = al;
	return hc;
3327 3328
}

3329 3330 3331 3332
/*
 * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
 * the compiler is expected to optimize this away.
 */
3333
static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3334 3335 3336 3337 3338 3339
			       int cpu, unsigned long done)
{
	trace_rcu_barrier(rsp->name, s, cpu,
			  atomic_read(&rsp->barrier_cpu_count), done);
}

3340 3341 3342 3343
/*
 * RCU callback function for _rcu_barrier().  If we are last, wake
 * up the task executing _rcu_barrier().
 */
3344
static void rcu_barrier_callback(struct rcu_head *rhp)
3345
{
3346 3347 3348
	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
	struct rcu_state *rsp = rdp->rsp;

3349
	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3350 3351
		_rcu_barrier_trace(rsp, TPS("LastCB"), -1,
				   rsp->barrier_sequence);
3352
		complete(&rsp->barrier_completion);
3353
	} else {
3354
		_rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
3355
	}
3356 3357 3358 3359 3360 3361 3362
}

/*
 * Called with preemption disabled, and from cross-cpu IRQ context.
 */
static void rcu_barrier_func(void *type)
{
3363
	struct rcu_state *rsp = type;
3364
	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3365

3366
	_rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
3367 3368 3369 3370 3371 3372
	rdp->barrier_head.func = rcu_barrier_callback;
	debug_rcu_head_queue(&rdp->barrier_head);
	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
		atomic_inc(&rsp->barrier_cpu_count);
	} else {
		debug_rcu_head_unqueue(&rdp->barrier_head);
3373 3374
		_rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
				   rsp->barrier_sequence);
3375
	}
3376 3377 3378 3379 3380 3381
}

/*
 * Orchestrate the specified type of RCU barrier, waiting for all
 * RCU callbacks of the specified type to complete.
 */
3382
static void _rcu_barrier(struct rcu_state *rsp)
3383
{
3384 3385
	int cpu;
	struct rcu_data *rdp;
3386
	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
3387

3388
	_rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
3389

3390
	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3391
	mutex_lock(&rsp->barrier_mutex);
3392

3393 3394
	/* Did someone else do our work for us? */
	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
3395 3396
		_rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
				   rsp->barrier_sequence);
3397 3398 3399 3400 3401
		smp_mb(); /* caller's subsequent code after above check. */
		mutex_unlock(&rsp->barrier_mutex);
		return;
	}

3402 3403
	/* Mark the start of the barrier operation. */
	rcu_seq_start(&rsp->barrier_sequence);
3404
	_rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
3405

3406
	/*
3407 3408
	 * Initialize the count to one rather than to zero in order to
	 * avoid a too-soon return to zero in case of a short grace period
3409 3410
	 * (or preemption of this task).  Exclude CPU-hotplug operations
	 * to ensure that no offline CPU has callbacks queued.
3411
	 */
3412
	init_completion(&rsp->barrier_completion);
3413
	atomic_set(&rsp->barrier_cpu_count, 1);
3414
	get_online_cpus();
3415 3416

	/*
3417 3418 3419
	 * Force each CPU with callbacks to register a new callback.
	 * When that callback is invoked, we will know that all of the
	 * corresponding CPU's preceding callbacks have been invoked.
3420
	 */
Paul E. McKenney's avatar
Paul E. McKenney committed
3421
	for_each_possible_cpu(cpu) {
3422
		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
Paul E. McKenney's avatar
Paul E. McKenney committed
3423
			continue;
3424
		rdp = per_cpu_ptr(rsp->rda, cpu);
3425
		if (rcu_is_nocb_cpu(cpu)) {
3426
			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3427
				_rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
3428
						   rsp->barrier_sequence);
3429
			} else {
3430
				_rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
3431
						   rsp->barrier_sequence);
3432
				smp_mb__before_atomic();
3433 3434 3435 3436
				atomic_inc(&rsp->barrier_cpu_count);
				__call_rcu(&rdp->barrier_head,
					   rcu_barrier_callback, rsp, cpu, 0);
			}
3437
		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3438
			_rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
3439
					   rsp->barrier_sequence);
3440
			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3441
		} else {
3442
			_rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
3443
					   rsp->barrier_sequence);
3444 3445
		}
	}
3446
	put_online_cpus();
3447 3448 3449 3450 3451

	/*
	 * Now that we have an rcu_barrier_callback() callback on each
	 * CPU, and thus each counted, remove the initial count.
	 */
3452
	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3453
		complete(&rsp->barrier_completion);
3454 3455

	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3456
	wait_for_completion(&rsp->barrier_completion);
3457

3458
	/* Mark the end of the barrier operation. */
3459
	_rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
3460 3461
	rcu_seq_end(&rsp->barrier_sequence);

3462
	/* Other rcu_barrier() invocations can now safely proceed. */
3463
	mutex_unlock(&rsp->barrier_mutex);
3464 3465 3466 3467 3468 3469 3470
}

/**
 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
 */
void rcu_barrier_bh(void)
{
3471
	_rcu_barrier(&rcu_bh_state);
3472 3473 3474 3475 3476 3477 3478 3479
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);

/**
 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
 */
void rcu_barrier_sched(void)
{
3480
	_rcu_barrier(&rcu_sched_state);
3481 3482 3483
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);

3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
/*
 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
 * first CPU in a given leaf rcu_node structure coming online.  The caller
 * must hold the corresponding leaf rcu_node ->lock with interrrupts
 * disabled.
 */
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
{
	long mask;
	struct rcu_node *rnp = rnp_leaf;

3495
	raw_lockdep_assert_held_rcu_node(rnp);
3496 3497 3498 3499 3500
	for (;;) {
		mask = rnp->grpmask;
		rnp = rnp->parent;
		if (rnp == NULL)
			return;
3501
		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3502
		rnp->qsmaskinit |= mask;
Boqun Feng's avatar
Boqun Feng committed
3503
		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3504 3505 3506
	}
}

3507
/*
3508
 * Do boot-time initialization of a CPU's per-CPU RCU data.
3509
 */
3510 3511
static void __init
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3512
{
3513
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3514 3515

	/* Set up local state, ensuring consistent view of global state. */
3516
	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3517
	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3518
	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
3519
	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3520
	rdp->cpu = cpu;
3521
	rdp->rsp = rsp;
Paul E. McKenney's avatar
Paul E. McKenney committed
3522
	rcu_boot_init_nocb_percpu_data(rdp);
3523 3524 3525 3526 3527 3528 3529
}

/*
 * Initialize a CPU's per-CPU RCU data.  Note that only one online or
 * offline event can be happening at a given time.  Note also that we
 * can accept some slop in the rsp->completed access due to the fact
 * that this CPU cannot possibly have any RCU callbacks in flight yet.
3530
 */
3531
static void
3532
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3533 3534
{
	unsigned long flags;
3535
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3536 3537 3538
	struct rcu_node *rnp = rcu_get_root(rsp);

	/* Set up local state, ensuring consistent view of global state. */
3539
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
3540 3541
	rdp->qlen_last_fqs_check = 0;
	rdp->n_force_qs_snap = rsp->n_force_qs;
3542
	rdp->blimit = blimit;
3543 3544 3545
	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
	    !init_nocb_callback_list(rdp))
		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3546
	rdp->dynticks->dynticks_nesting = 1;	/* CPU not up, no tearing. */
3547
	rcu_dynticks_eqs_online();
Boqun Feng's avatar
Boqun Feng committed
3548
	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
3549

3550 3551 3552 3553 3554
	/*
	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
	 * propagation up the rcu_node tree will happen at the beginning
	 * of the next grace period.
	 */
3555
	rnp = rdp->mynode;
3556
	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
3557
	rdp->beenonline = true;	 /* We have now been online. */
3558 3559
	rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
	rdp->completed = rnp->completed;
3560
	rdp->cpu_no_qs.b.norm = true;
3561
	rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3562
	rdp->core_needs_qs = false;
3563 3564
	rdp->rcu_iw_pending = false;
	rdp->rcu_iw_gpnum = rnp->gpnum - 1;
3565
	trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
Boqun Feng's avatar
Boqun Feng committed
3566
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3567 3568
}

3569 3570 3571 3572
/*
 * Invoked early in the CPU-online process, when pretty much all
 * services are available.  The incoming CPU is not present.
 */
3573
int rcutree_prepare_cpu(unsigned int cpu)
3574
{
3575 3576 3577
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp)
3578
		rcu_init_percpu_data(cpu, rsp);
3579 3580 3581 3582 3583 3584 3585

	rcu_prepare_kthreads(cpu);
	rcu_spawn_all_nocb_kthreads(cpu);

	return 0;
}

3586 3587 3588
/*
 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
 */
3589 3590 3591 3592 3593 3594 3595
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
{
	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);

	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
}

3596 3597 3598 3599
/*
 * Near the end of the CPU-online process.  Pretty much all services
 * enabled, and the CPU is now very much alive.
 */
3600 3601
int rcutree_online_cpu(unsigned int cpu)
{
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
	unsigned long flags;
	struct rcu_data *rdp;
	struct rcu_node *rnp;
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp) {
		rdp = per_cpu_ptr(rsp->rda, cpu);
		rnp = rdp->mynode;
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		rnp->ffmask |= rdp->grpmask;
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	}
3614 3615
	if (IS_ENABLED(CONFIG_TREE_SRCU))
		srcu_online_cpu(cpu);
3616 3617 3618 3619
	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
		return 0; /* Too early in boot for scheduler work. */
	sync_sched_exp_online_cleanup(cpu);
	rcutree_affinity_setting(cpu, -1);
3620 3621 3622
	return 0;
}

3623 3624 3625 3626
/*
 * Near the beginning of the process.  The CPU is still very much alive
 * with pretty much all services enabled.
 */
3627 3628
int rcutree_offline_cpu(unsigned int cpu)
{
3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
	unsigned long flags;
	struct rcu_data *rdp;
	struct rcu_node *rnp;
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp) {
		rdp = per_cpu_ptr(rsp->rda, cpu);
		rnp = rdp->mynode;
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		rnp->ffmask &= ~rdp->grpmask;
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	}

3642
	rcutree_affinity_setting(cpu, cpu);
3643 3644
	if (IS_ENABLED(CONFIG_TREE_SRCU))
		srcu_offline_cpu(cpu);
3645 3646 3647
	return 0;
}

3648 3649 3650
/*
 * Near the end of the offline process.  We do only tracing here.
 */
3651 3652 3653 3654 3655 3656 3657 3658 3659
int rcutree_dying_cpu(unsigned int cpu)
{
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp)
		rcu_cleanup_dying_cpu(rsp);
	return 0;
}

3660 3661 3662
/*
 * The outgoing CPU is gone and we are running elsewhere.
 */
3663 3664 3665 3666 3667 3668 3669 3670 3671
int rcutree_dead_cpu(unsigned int cpu)
{
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp) {
		rcu_cleanup_dead_cpu(cpu, rsp);
		do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
	}
	return 0;
3672 3673
}

3674 3675 3676 3677 3678 3679
/*
 * Mark the specified CPU as being online so that subsequent grace periods
 * (both expedited and normal) will wait on it.  Note that this means that
 * incoming CPUs are not allowed to use RCU read-side critical sections
 * until this function is called.  Failing to observe this restriction
 * will result in lockdep splats.
3680 3681 3682 3683
 *
 * Note that this function is special in that it is invoked directly
 * from the incoming CPU rather than from the cpuhp_step mechanism.
 * This is because this function must be invoked at a precise location.
3684 3685 3686 3687 3688
 */
void rcu_cpu_starting(unsigned int cpu)
{
	unsigned long flags;
	unsigned long mask;
3689 3690
	int nbits;
	unsigned long oldmask;
3691 3692 3693 3694 3695
	struct rcu_data *rdp;
	struct rcu_node *rnp;
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp) {
3696
		rdp = per_cpu_ptr(rsp->rda, cpu);
3697 3698 3699 3700
		rnp = rdp->mynode;
		mask = rdp->grpmask;
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		rnp->qsmaskinitnext |= mask;
3701
		oldmask = rnp->expmaskinitnext;
3702
		rnp->expmaskinitnext |= mask;
3703 3704 3705 3706
		oldmask ^= rnp->expmaskinitnext;
		nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
		/* Allow lockless access for expedited grace periods. */
		smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
3707 3708
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	}
3709
	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3710 3711
}

3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
#ifdef CONFIG_HOTPLUG_CPU
/*
 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
 * function.  We now remove it from the rcu_node tree's ->qsmaskinit
 * bit masks.
 */
static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
{
	unsigned long flags;
	unsigned long mask;
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */

	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
	mask = rdp->grpmask;
	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
	rnp->qsmaskinitnext &= ~mask;
3729
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3730 3731
}

3732 3733 3734 3735 3736 3737 3738 3739
/*
 * The outgoing function has no further need of RCU, so remove it from
 * the list of CPUs that RCU must track.
 *
 * Note that this function is special in that it is invoked directly
 * from the outgoing CPU rather than from the cpuhp_step mechanism.
 * This is because this function must be invoked at a precise location.
 */
3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751
void rcu_report_dead(unsigned int cpu)
{
	struct rcu_state *rsp;

	/* QS for any half-done expedited RCU-sched GP. */
	preempt_disable();
	rcu_report_exp_rdp(&rcu_sched_state,
			   this_cpu_ptr(rcu_sched_state.rda), true);
	preempt_enable();
	for_each_rcu_flavor(rsp)
		rcu_cleanup_dying_idle_cpu(cpu, rsp);
}
3752

3753
/* Migrate the dead CPU's callbacks to the current CPU. */
3754 3755 3756
static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
{
	unsigned long flags;
3757
	struct rcu_data *my_rdp;
3758
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3759
	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3760
	bool needwake;
3761

3762 3763 3764
	if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
		return;  /* No callbacks to migrate. */

3765 3766 3767 3768 3769 3770
	local_irq_save(flags);
	my_rdp = this_cpu_ptr(rsp->rda);
	if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
		local_irq_restore(flags);
		return;
	}
3771
	raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
3772 3773 3774
	/* Leverage recent GPs and set GP for new callbacks. */
	needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
		   rcu_advance_cbs(rsp, rnp_root, my_rdp);
3775
	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3776 3777
	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
3778
	raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3779 3780
	if (needwake)
		rcu_gp_kthread_wake(rsp);
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799
	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
		  !rcu_segcblist_empty(&rdp->cblist),
		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
		  rcu_segcblist_first_cb(&rdp->cblist));
}

/*
 * The outgoing CPU has just passed through the dying-idle state,
 * and we are being invoked from the CPU that was IPIed to continue the
 * offline operation.  We need to migrate the outgoing CPU's callbacks.
 */
void rcutree_migrate_callbacks(int cpu)
{
	struct rcu_state *rsp;

	for_each_rcu_flavor(rsp)
		rcu_migrate_callbacks(cpu, rsp);
}
3800 3801
#endif

3802 3803 3804 3805
/*
 * On non-huge systems, use expedited RCU grace periods to make suspend
 * and hibernation run faster.
 */
3806 3807 3808 3809 3810 3811 3812
static int rcu_pm_notify(struct notifier_block *self,
			 unsigned long action, void *hcpu)
{
	switch (action) {
	case PM_HIBERNATION_PREPARE:
	case PM_SUSPEND_PREPARE:
		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3813
			rcu_expedite_gp();
3814 3815 3816
		break;
	case PM_POST_HIBERNATION:
	case PM_POST_SUSPEND:
3817 3818
		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
			rcu_unexpedite_gp();
3819 3820 3821 3822 3823 3824 3825
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}

3826
/*
3827
 * Spawn the kthreads that handle each RCU flavor's grace periods.
3828 3829 3830 3831
 */
static int __init rcu_spawn_gp_kthread(void)
{
	unsigned long flags;
3832
	int kthread_prio_in = kthread_prio;
3833 3834
	struct rcu_node *rnp;
	struct rcu_state *rsp;
3835
	struct sched_param sp;
3836 3837
	struct task_struct *t;

3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848
	/* Force priority into range. */
	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
		kthread_prio = 1;
	else if (kthread_prio < 0)
		kthread_prio = 0;
	else if (kthread_prio > 99)
		kthread_prio = 99;
	if (kthread_prio != kthread_prio_in)
		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
			 kthread_prio, kthread_prio_in);

3849
	rcu_scheduler_fully_active = 1;
3850
	for_each_rcu_flavor(rsp) {
3851
		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3852 3853
		BUG_ON(IS_ERR(t));
		rnp = rcu_get_root(rsp);
3854
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
3855
		rsp->gp_kthread = t;
3856 3857 3858 3859
		if (kthread_prio) {
			sp.sched_priority = kthread_prio;
			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
		}
Boqun Feng's avatar
Boqun Feng committed
3860
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3861
		wake_up_process(t);
3862
	}
3863
	rcu_spawn_nocb_kthreads();
3864
	rcu_spawn_boost_kthreads();
3865 3866 3867 3868
	return 0;
}
early_initcall(rcu_spawn_gp_kthread);

3869
/*
3870 3871 3872 3873 3874 3875
 * This function is invoked towards the end of the scheduler's
 * initialization process.  Before this is called, the idle task might
 * contain synchronous grace-period primitives (during which time, this idle
 * task is booting the system, and such primitives are no-ops).  After this
 * function is called, any synchronous grace-period primitives are run as
 * expedited, with the requesting task driving the grace period forward.
3876
 * A later core_initcall() rcu_set_runtime_mode() will switch to full
3877
 * runtime RCU functionality.
3878 3879 3880 3881 3882
 */
void rcu_scheduler_starting(void)
{
	WARN_ON(num_online_cpus() != 1);
	WARN_ON(nr_context_switches() > 0);
3883 3884 3885
	rcu_test_sync_prims();
	rcu_scheduler_active = RCU_SCHEDULER_INIT;
	rcu_test_sync_prims();
3886 3887
}

3888 3889 3890
/*
 * Helper function for rcu_init() that initializes one rcu_state structure.
 */
3891
static void __init rcu_init_one(struct rcu_state *rsp)
3892
{
3893 3894
	static const char * const buf[] = RCU_NODE_NAME_INIT;
	static const char * const fqs[] = RCU_FQS_NAME_INIT;
3895 3896
	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
3897 3898

	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
3899 3900 3901 3902 3903
	int cpustride = 1;
	int i;
	int j;
	struct rcu_node *rnp;

3904
	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3905

3906 3907 3908
	/* Silence gcc 4.8 false positive about array index out of range. */
	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
		panic("rcu_init_one: rcu_num_lvls out of range");
3909

3910 3911
	/* Initialize the level-tracking arrays. */

3912
	for (i = 1; i < rcu_num_lvls; i++)
3913 3914
		rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
	rcu_init_levelspread(levelspread, num_rcu_lvl);
3915 3916 3917

	/* Initialize the elements themselves, starting from the leaves. */

3918
	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3919
		cpustride *= levelspread[i];
3920
		rnp = rsp->level[i];
3921
		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
Boqun Feng's avatar
Boqun Feng committed
3922 3923
			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
3924
						   &rcu_node_class[i], buf[i]);
3925 3926 3927
			raw_spin_lock_init(&rnp->fqslock);
			lockdep_set_class_and_name(&rnp->fqslock,
						   &rcu_fqs_class[i], fqs[i]);
3928 3929
			rnp->gpnum = rsp->gpnum;
			rnp->completed = rsp->completed;
3930 3931 3932 3933
			rnp->qsmask = 0;
			rnp->qsmaskinit = 0;
			rnp->grplo = j * cpustride;
			rnp->grphi = (j + 1) * cpustride - 1;
3934 3935
			if (rnp->grphi >= nr_cpu_ids)
				rnp->grphi = nr_cpu_ids - 1;
3936 3937 3938 3939 3940
			if (i == 0) {
				rnp->grpnum = 0;
				rnp->grpmask = 0;
				rnp->parent = NULL;
			} else {
3941
				rnp->grpnum = j % levelspread[i - 1];
3942 3943
				rnp->grpmask = 1UL << rnp->grpnum;
				rnp->parent = rsp->level[i - 1] +
3944
					      j / levelspread[i - 1];
3945 3946
			}
			rnp->level = i;
3947
			INIT_LIST_HEAD(&rnp->blkd_tasks);
3948
			rcu_init_one_nocb(rnp);
3949 3950
			init_waitqueue_head(&rnp->exp_wq[0]);
			init_waitqueue_head(&rnp->exp_wq[1]);
3951 3952
			init_waitqueue_head(&rnp->exp_wq[2]);
			init_waitqueue_head(&rnp->exp_wq[3]);
3953
			spin_lock_init(&rnp->exp_lock);
3954 3955
		}
	}
3956

3957 3958
	init_swait_queue_head(&rsp->gp_wq);
	init_swait_queue_head(&rsp->expedited_wq);
3959
	rnp = rsp->level[rcu_num_lvls - 1];
3960
	for_each_possible_cpu(i) {
3961
		while (i > rnp->grphi)
3962
			rnp++;
3963
		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3964 3965
		rcu_boot_init_percpu_data(i, rsp);
	}
3966
	list_add(&rsp->flavors, &rcu_struct_flavors);
3967 3968
}

3969 3970
/*
 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3971
 * replace the definitions in tree.h because those are needed to size
3972 3973 3974 3975
 * the ->node array in the rcu_state structure.
 */
static void __init rcu_init_geometry(void)
{
3976
	ulong d;
3977
	int i;
3978
	int rcu_capacity[RCU_NUM_LVLS];
3979

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
	/*
	 * Initialize any unspecified boot parameters.
	 * The default values of jiffies_till_first_fqs and
	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
	 * value, which is a function of HZ, then adding one for each
	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
	 */
	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
	if (jiffies_till_first_fqs == ULONG_MAX)
		jiffies_till_first_fqs = d;
	if (jiffies_till_next_fqs == ULONG_MAX)
		jiffies_till_next_fqs = d;

3993
	/* If the compile-time values are accurate, just leave. */
3994
	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
3995
	    nr_cpu_ids == NR_CPUS)
3996
		return;
3997
	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
3998
		rcu_fanout_leaf, nr_cpu_ids);
3999 4000

	/*
4001 4002 4003 4004
	 * The boot-time rcu_fanout_leaf parameter must be at least two
	 * and cannot exceed the number of bits in the rcu_node masks.
	 * Complain and fall back to the compile-time values if this
	 * limit is exceeded.
4005
	 */
4006
	if (rcu_fanout_leaf < 2 ||
4007
	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4008
		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4009 4010 4011 4012 4013 4014
		WARN_ON(1);
		return;
	}

	/*
	 * Compute number of nodes that can be handled an rcu_node tree
4015
	 * with the given number of levels.
4016
	 */
4017
	rcu_capacity[0] = rcu_fanout_leaf;
4018
	for (i = 1; i < RCU_NUM_LVLS; i++)
4019
		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4020 4021

	/*
4022
	 * The tree must be able to accommodate the configured number of CPUs.
4023
	 * If this limit is exceeded, fall back to the compile-time values.
4024
	 */
4025 4026 4027 4028 4029
	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
		rcu_fanout_leaf = RCU_FANOUT_LEAF;
		WARN_ON(1);
		return;
	}
4030

4031
	/* Calculate the number of levels in the tree. */
4032
	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4033
	}
4034
	rcu_num_lvls = i + 1;
4035

4036
	/* Calculate the number of rcu_nodes at each level of the tree. */
4037
	for (i = 0; i < rcu_num_lvls; i++) {
4038
		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4039 4040
		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
	}
4041 4042 4043

	/* Calculate the total number of rcu_node structures. */
	rcu_num_nodes = 0;
4044
	for (i = 0; i < rcu_num_lvls; i++)
4045 4046 4047
		rcu_num_nodes += num_rcu_lvl[i];
}

4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069
/*
 * Dump out the structure of the rcu_node combining tree associated
 * with the rcu_state structure referenced by rsp.
 */
static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
{
	int level = 0;
	struct rcu_node *rnp;

	pr_info("rcu_node tree layout dump\n");
	pr_info(" ");
	rcu_for_each_node_breadth_first(rsp, rnp) {
		if (rnp->level != level) {
			pr_cont("\n");
			pr_info(" ");
			level = rnp->level;
		}
		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
	}
	pr_cont("\n");
}

4070 4071
struct workqueue_struct *rcu_gp_wq;

4072
void __init rcu_init(void)
4073
{
Paul E. McKenney's avatar
Paul E. McKenney committed
4074
	int cpu;
4075

4076 4077
	rcu_early_boot_tests();

4078
	rcu_bootup_announce();
4079
	rcu_init_geometry();
4080 4081
	rcu_init_one(&rcu_bh_state);
	rcu_init_one(&rcu_sched_state);
4082 4083
	if (dump_tree)
		rcu_dump_rcu_node_tree(&rcu_sched_state);
4084
	__rcu_init_preempt();
Jiang Fang's avatar
Jiang Fang committed
4085
	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4086 4087 4088 4089 4090 4091

	/*
	 * We don't need protection against CPU-hotplug here because
	 * this is called early in boot, before either interrupts
	 * or the scheduler are operational.
	 */
4092
	pm_notifier(rcu_pm_notify, 0);
4093
	for_each_online_cpu(cpu) {
4094
		rcutree_prepare_cpu(cpu);
4095
		rcu_cpu_starting(cpu);
4096
		rcutree_online_cpu(cpu);
4097
	}
4098 4099 4100 4101

	/* Create workqueue for expedited GPs and for Tree SRCU. */
	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
	WARN_ON(!rcu_gp_wq);
4102 4103
}

4104
#include "tree_exp.h"
4105
#include "tree_plugin.h"