tree.c 123 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Read-Copy Update mechanism for mutual exclusion
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15 16
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
17 18 19 20 21 22 23 24 25 26 27
 *
 * Copyright IBM Corporation, 2008
 *
 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
 *	    Manfred Spraul <manfred@colorfullife.com>
 *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
 *
 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 *
 * For detailed explanation of Read-Copy Update mechanism see -
28
 *	Documentation/RCU
29
 */
30 31 32

#define pr_fmt(fmt) "rcu: " fmt

33 34 35 36 37
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
38
#include <linux/rcupdate_wait.h>
39 40
#include <linux/interrupt.h>
#include <linux/sched.h>
41
#include <linux/sched/debug.h>
42
#include <linux/nmi.h>
43
#include <linux/atomic.h>
44
#include <linux/bitops.h>
45
#include <linux/export.h>
46 47 48 49 50 51 52
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
53
#include <linux/kernel_stat.h>
54 55
#include <linux/wait.h>
#include <linux/kthread.h>
56
#include <uapi/linux/sched/types.h>
57
#include <linux/prefetch.h>
58 59
#include <linux/delay.h>
#include <linux/stop_machine.h>
60
#include <linux/random.h>
61
#include <linux/trace_events.h>
62
#include <linux/suspend.h>
63
#include <linux/ftrace.h>
64
#include <linux/tick.h>
65
#include <linux/sysrq.h>
66

67
#include "tree.h"
68
#include "rcu.h"
69

70 71 72 73 74
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "rcutree."

75 76
/* Data structures. */

77
/*
78 79
 * Steal a bit from the bottom of ->dynticks for idle entry/exit
 * control.  Initially this is for TLB flushing.
80
 */
81 82 83 84
#define RCU_DYNTICK_CTRL_MASK 0x1
#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
#ifndef rcu_eqs_special_exit
#define rcu_eqs_special_exit() do { } while (0)
85 86
#endif

87 88 89
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
	.dynticks_nesting = 1,
	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
90
	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
91
};
92 93 94 95 96 97 98 99 100
struct rcu_state rcu_state = {
	.level = { &rcu_state.node[0] },
	.gp_state = RCU_GP_IDLE,
	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
	.name = RCU_NAME,
	.abbr = RCU_ABBR,
	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
101
	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
102
};
103

104 105 106
/* Dump rcu_node combining tree at boot to verify correct setup. */
static bool dump_tree;
module_param(dump_tree, bool, 0444);
107 108 109
/* Control rcu_node-tree auto-balancing at boot time. */
static bool rcu_fanout_exact;
module_param(rcu_fanout_exact, bool, 0444);
110 111
/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
112
module_param(rcu_fanout_leaf, int, 0444);
113
int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
114
/* Number of rcu_nodes at specified level. */
115
int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
116
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
117 118
/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
119 120 121
/* Commandeer a sysrq key to dump RCU's tree. */
static bool sysrq_rcu;
module_param(sysrq_rcu, bool, 0444);
122

123
/*
124 125 126 127
 * The rcu_scheduler_active variable is initialized to the value
 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 * RCU can assume that there is but one task, allowing RCU to (for example)
128
 * optimize synchronize_rcu() to a simple barrier().  When this variable
129 130 131 132 133
 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 * to detect real grace periods.  This variable is also used to suppress
 * boot-time false positives from lockdep-RCU error checking.  Finally, it
 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 * is fully initialized, including all of its kthreads having been spawned.
134
 */
135 136 137
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);

138 139 140 141 142 143 144 145 146 147 148 149 150 151
/*
 * The rcu_scheduler_fully_active variable transitions from zero to one
 * during the early_initcall() processing, which is after the scheduler
 * is capable of creating new tasks.  So RCU processing (for example,
 * creating tasks for RCU priority boosting) must be delayed until after
 * rcu_scheduler_fully_active transitions from zero to one.  We also
 * currently delay invocation of any RCU callbacks until after this point.
 *
 * It might later prove better for people registering RCU callbacks during
 * early boot to take responsibility for these callbacks, but one step at
 * a time.
 */
static int rcu_scheduler_fully_active __read_mostly;

152 153
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
			      unsigned long gps, unsigned long flags);
154 155
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
Thomas Gleixner's avatar
Thomas Gleixner committed
156
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
157
static void invoke_rcu_core(void);
158
static void invoke_rcu_callbacks(struct rcu_data *rdp);
159
static void rcu_report_exp_rdp(struct rcu_data *rdp);
160
static void sync_sched_exp_online_cleanup(int cpu);
161

162
/* rcuc/rcub kthread realtime priority */
163
static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
164 165
module_param(kthread_prio, int, 0644);

166
/* Delay in jiffies for grace-period initialization delays, debug only. */
167

168 169 170 171 172 173
static int gp_preinit_delay;
module_param(gp_preinit_delay, int, 0444);
static int gp_init_delay;
module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
174

175
/* Retrieve RCU kthreads priority for rcutorture */
176 177 178 179 180 181
int rcu_get_gp_kthreads_prio(void)
{
	return kthread_prio;
}
EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);

182 183
/*
 * Number of grace periods between delays, normalized by the duration of
184
 * the delay.  The longer the delay, the more the grace periods between
185 186 187 188 189 190 191
 * each delay.  The reason for this normalization is that it means that,
 * for non-zero delays, the overall slowdown of grace periods is constant
 * regardless of the duration of the delay.  This arrangement balances
 * the need for long delays to increase some race probabilities with the
 * need for fast grace periods to increase other race probabilities.
 */
#define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
192

193 194 195 196 197 198 199 200
/*
 * Compute the mask of online CPUs for the specified rcu_node structure.
 * This will not be stable unless the rcu_node structure's ->lock is
 * held, but the bit corresponding to the current CPU will be stable
 * in most contexts.
 */
unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
{
201
	return READ_ONCE(rnp->qsmaskinitnext);
202 203
}

204
/*
205
 * Return true if an RCU grace period is in progress.  The READ_ONCE()s
206 207 208
 * permit this function to be invoked without holding the root rcu_node
 * structure's ->lock, but of course results can be subject to change.
 */
209
static int rcu_gp_in_progress(void)
210
{
211
	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
212 213
}

214 215 216 217 218 219 220 221 222 223 224 225 226
/*
 * Return the number of callbacks queued on the specified CPU.
 * Handles both the nocbs and normal cases.
 */
static long rcu_get_n_cbs_cpu(int cpu)
{
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);

	if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */
		return rcu_segcblist_n_cbs(&rdp->cblist);
	return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */
}

227
void rcu_softirq_qs(void)
228
{
229
	rcu_qs();
230
	rcu_preempt_deferred_qs(current);
231
}
232

233 234 235 236 237 238
/*
 * Record entry into an extended quiescent state.  This is only to be
 * called when not already in an extended quiescent state.
 */
static void rcu_dynticks_eqs_enter(void)
{
239
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
240
	int seq;
241 242

	/*
243
	 * CPUs seeing atomic_add_return() must see prior RCU read-side
244 245 246
	 * critical sections, and we also must force ordering with the
	 * next idle sojourn.
	 */
247
	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
248 249 250 251 252 253
	/* Better be in an extended quiescent state! */
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     (seq & RCU_DYNTICK_CTRL_CTR));
	/* Better not have special action (TLB flush) pending! */
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     (seq & RCU_DYNTICK_CTRL_MASK));
254 255 256 257 258 259 260 261
}

/*
 * Record exit from an extended quiescent state.  This is only to be
 * called from an extended quiescent state.
 */
static void rcu_dynticks_eqs_exit(void)
{
262
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
263
	int seq;
264 265

	/*
266
	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
267 268 269
	 * and we also must force ordering with the next RCU read-side
	 * critical section.
	 */
270
	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
271 272 273
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
		     !(seq & RCU_DYNTICK_CTRL_CTR));
	if (seq & RCU_DYNTICK_CTRL_MASK) {
274
		atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
275 276 277 278
		smp_mb__after_atomic(); /* _exit after clearing mask. */
		/* Prefer duplicate flushes to losing a flush. */
		rcu_eqs_special_exit();
	}
279 280 281 282 283 284 285 286 287 288 289 290 291 292
}

/*
 * Reset the current CPU's ->dynticks counter to indicate that the
 * newly onlined CPU is no longer in an extended quiescent state.
 * This will either leave the counter unchanged, or increment it
 * to the next non-quiescent value.
 *
 * The non-atomic test/increment sequence works because the upper bits
 * of the ->dynticks counter are manipulated only by the corresponding CPU,
 * or when the corresponding CPU is offline.
 */
static void rcu_dynticks_eqs_online(void)
{
293
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
294

295
	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
296
		return;
297
	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
298 299
}

300 301 302 303 304 305 306
/*
 * Is the current CPU in an extended quiescent state?
 *
 * No ordering, as we are sampling CPU-local information.
 */
bool rcu_dynticks_curr_cpu_in_eqs(void)
{
307
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
308

309
	return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
310 311
}

312 313 314 315
/*
 * Snapshot the ->dynticks counter with full ordering so as to allow
 * stable comparison of this counter with past and future snapshots.
 */
316
int rcu_dynticks_snap(struct rcu_data *rdp)
317
{
318
	int snap = atomic_add_return(0, &rdp->dynticks);
319

320
	return snap & ~RCU_DYNTICK_CTRL_MASK;
321 322
}

323 324 325 326 327 328
/*
 * Return true if the snapshot returned from rcu_dynticks_snap()
 * indicates that RCU is in an extended quiescent state.
 */
static bool rcu_dynticks_in_eqs(int snap)
{
329
	return !(snap & RCU_DYNTICK_CTRL_CTR);
330 331 332
}

/*
333
 * Return true if the CPU corresponding to the specified rcu_data
334 335 336
 * structure has spent some time in an extended quiescent state since
 * rcu_dynticks_snap() returned the specified snapshot.
 */
337
static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
338
{
339
	return snap != rcu_dynticks_snap(rdp);
340 341
}

342 343 344 345 346 347 348 349 350 351 352
/*
 * Set the special (bottom) bit of the specified CPU so that it
 * will take special action (such as flushing its TLB) on the
 * next exit from an extended quiescent state.  Returns true if
 * the bit was successfully set, or false if the CPU was not in
 * an extended quiescent state.
 */
bool rcu_eqs_special_set(int cpu)
{
	int old;
	int new;
353
	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
354 355

	do {
356
		old = atomic_read(&rdp->dynticks);
357 358 359
		if (old & RCU_DYNTICK_CTRL_CTR)
			return false;
		new = old | RCU_DYNTICK_CTRL_MASK;
360
	} while (atomic_cmpxchg(&rdp->dynticks, old, new) != old);
361
	return true;
362
}
363

364 365 366 367 368 369 370
/*
 * Let the RCU core know that this CPU has gone through the scheduler,
 * which is a quiescent state.  This is called when the need for a
 * quiescent state is urgent, so we burn an atomic operation and full
 * memory barriers to let the RCU core know about it, regardless of what
 * this CPU might (or might not) do in the near future.
 *
371
 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
372
 *
373
 * The caller must have disabled interrupts and must not be idle.
374
 */
375
static void __maybe_unused rcu_momentary_dyntick_idle(void)
376
{
377 378
	int special;

379
	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
380 381
	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
				    &this_cpu_ptr(&rcu_data)->dynticks);
382 383
	/* It is illegal to call this from idle state. */
	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
384
	rcu_preempt_deferred_qs(current);
385 386
}

387 388
/**
 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
389
 *
390 391 392
 * If the current CPU is idle or running at a first-level (not nested)
 * interrupt from idle, return true.  The caller must have at least
 * disabled preemption.
393
 */
394
static int rcu_is_cpu_rrupt_from_idle(void)
395
{
396 397
	return __this_cpu_read(rcu_data.dynticks_nesting) <= 0 &&
	       __this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 1;
398 399
}

400 401 402 403 404 405
#define DEFAULT_RCU_BLIMIT 10     /* Maximum callbacks per rcu_do_batch. */
static long blimit = DEFAULT_RCU_BLIMIT;
#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
static long qhimark = DEFAULT_RCU_QHIMARK;
#define DEFAULT_RCU_QLOMARK 100   /* Once only this many pending, use blimit. */
static long qlowmark = DEFAULT_RCU_QLOMARK;
406

407 408 409
module_param(blimit, long, 0444);
module_param(qhimark, long, 0444);
module_param(qlowmark, long, 0444);
410

411 412
static ulong jiffies_till_first_fqs = ULONG_MAX;
static ulong jiffies_till_next_fqs = ULONG_MAX;
413
static bool rcu_kick_kthreads;
414

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
/*
 * How long the grace period must be before we start recruiting
 * quiescent-state help from rcu_note_context_switch().
 */
static ulong jiffies_till_sched_qs = ULONG_MAX;
module_param(jiffies_till_sched_qs, ulong, 0444);
static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */
module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */

/*
 * Make sure that we give the grace-period kthread time to detect any
 * idle CPUs before taking active measures to force quiescent states.
 * However, don't go below 100 milliseconds, adjusted upwards for really
 * large systems.
 */
static void adjust_jiffies_till_sched_qs(void)
{
	unsigned long j;

	/* If jiffies_till_sched_qs was specified, respect the request. */
	if (jiffies_till_sched_qs != ULONG_MAX) {
		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
		return;
	}
	j = READ_ONCE(jiffies_till_first_fqs) +
		      2 * READ_ONCE(jiffies_till_next_fqs);
	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
	WRITE_ONCE(jiffies_to_sched_qs, j);
}

447 448 449 450 451
static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
{
	ulong j;
	int ret = kstrtoul(val, 0, &j);

452
	if (!ret) {
453
		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
454 455
		adjust_jiffies_till_sched_qs();
	}
456 457 458 459 460 461 462 463
	return ret;
}

static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
{
	ulong j;
	int ret = kstrtoul(val, 0, &j);

464
	if (!ret) {
465
		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
466 467
		adjust_jiffies_till_sched_qs();
	}
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
	return ret;
}

static struct kernel_param_ops first_fqs_jiffies_ops = {
	.set = param_set_first_fqs_jiffies,
	.get = param_get_ulong,
};

static struct kernel_param_ops next_fqs_jiffies_ops = {
	.set = param_set_next_fqs_jiffies,
	.get = param_get_ulong,
};

module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
483
module_param(rcu_kick_kthreads, bool, 0644);
484

485
static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
486
static int rcu_pending(void);
487 488

/*
489
 * Return the number of RCU GPs completed thus far for debug & stats.
490
 */
491
unsigned long rcu_get_gp_seq(void)
492
{
493
	return READ_ONCE(rcu_state.gp_seq);
494
}
495
EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
496

497 498 499 500 501 502 503 504
/*
 * Return the number of RCU expedited batches completed thus far for
 * debug & stats.  Odd numbers mean that a batch is in progress, even
 * numbers mean idle.  The value returned will thus be roughly double
 * the cumulative batches since boot.
 */
unsigned long rcu_exp_batches_completed(void)
{
505
	return rcu_state.expedited_sequence;
506 507 508
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);

509 510 511 512 513 514 515 516
/*
 * Return the root node of the rcu_state structure.
 */
static struct rcu_node *rcu_get_root(void)
{
	return &rcu_state.node[0];
}

517 518 519 520 521 522 523 524 525 526
/*
 * Convert a ->gp_state value to a character string.
 */
static const char *gp_state_getname(short gs)
{
	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
		return "???";
	return gp_state_names[gs];
}

527 528 529 530 531
/*
 * Show the state of the grace-period kthreads.
 */
void show_rcu_gp_kthreads(void)
{
532
	int cpu;
533
	unsigned long j;
534 535 536
	unsigned long ja;
	unsigned long jr;
	unsigned long jw;
537 538
	struct rcu_data *rdp;
	struct rcu_node *rnp;
539

540 541 542 543 544
	j = jiffies;
	ja = j - READ_ONCE(rcu_state.gp_activity);
	jr = j - READ_ONCE(rcu_state.gp_req_activity);
	jw = j - READ_ONCE(rcu_state.gp_wake_time);
	pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
545
		rcu_state.name, gp_state_getname(rcu_state.gp_state),
546 547 548 549 550 551
		rcu_state.gp_state,
		rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
		ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
		(long)READ_ONCE(rcu_state.gp_seq),
		(long)READ_ONCE(rcu_get_root()->gp_seq_needed),
		READ_ONCE(rcu_state.gp_flags));
552 553 554
	rcu_for_each_node_breadth_first(rnp) {
		if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
			continue;
555 556 557
		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
			rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
			(long)rnp->gp_seq_needed);
558 559 560 561 562 563 564
		if (!rcu_is_leaf_node(rnp))
			continue;
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			rdp = per_cpu_ptr(&rcu_data, cpu);
			if (rdp->gpwrap ||
			    ULONG_CMP_GE(rcu_state.gp_seq,
					 rdp->gp_seq_needed))
565
				continue;
566 567
			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
				cpu, (long)rdp->gp_seq_needed);
568
		}
569
	}
570
	/* sched_show_task(rcu_state.gp_kthread); */
571 572 573
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);

574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/* Dump grace-period-request information due to commandeered sysrq. */
static void sysrq_show_rcu(int key)
{
	show_rcu_gp_kthreads();
}

static struct sysrq_key_op sysrq_rcudump_op = {
	.handler = sysrq_show_rcu,
	.help_msg = "show-rcu(y)",
	.action_msg = "Show RCU tree",
	.enable_mask = SYSRQ_ENABLE_DUMP,
};

static int __init rcu_sysrq_init(void)
{
	if (sysrq_rcu)
		return register_sysrq_key('y', &sysrq_rcudump_op);
	return 0;
}
early_initcall(rcu_sysrq_init);

595 596 597 598
/*
 * Send along grace-period-related data for rcutorture diagnostics.
 */
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599
			    unsigned long *gp_seq)
600 601 602
{
	switch (test_type) {
	case RCU_FLAVOR:
603 604
		*flags = READ_ONCE(rcu_state.gp_flags);
		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
605 606 607 608 609 610 611
		break;
	default:
		break;
	}
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);

612
/*
613 614
 * Enter an RCU extended quiescent state, which can be either the
 * idle loop or adaptive-tickless usermode execution.
615
 *
616 617 618
 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
 * the possibility of usermode upcalls having messed up our count
 * of interrupt nesting level during the prior busy period.
619
 */
620
static void rcu_eqs_enter(bool user)
621
{
622
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
623

624 625
	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 628 629
		     rdp->dynticks_nesting == 0);
	if (rdp->dynticks_nesting != 1) {
		rdp->dynticks_nesting--;
630
		return;
631
	}
632

633
	lockdep_assert_irqs_disabled();
634
	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
635
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
636 637
	rdp = this_cpu_ptr(&rcu_data);
	do_nocb_deferred_wakeup(rdp);
638
	rcu_prepare_for_idle();
639
	rcu_preempt_deferred_qs(current);
640
	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
641
	rcu_dynticks_eqs_enter();
642
	rcu_dynticks_task_enter();
643
}
644 645 646 647 648 649 650 651 652

/**
 * rcu_idle_enter - inform RCU that current CPU is entering idle
 *
 * Enter idle mode, in other words, -leave- the mode in which RCU
 * read-side critical sections can occur.  (Though RCU read-side
 * critical sections can occur in irq handlers in idle, a possibility
 * handled by irq_enter() and irq_exit().)
 *
653 654
 * If you add or remove a call to rcu_idle_enter(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
655 656 657
 */
void rcu_idle_enter(void)
{
658
	lockdep_assert_irqs_disabled();
659
	rcu_eqs_enter(false);
660
}
661

662
#ifdef CONFIG_NO_HZ_FULL
663 664 665 666 667 668 669
/**
 * rcu_user_enter - inform RCU that we are resuming userspace.
 *
 * Enter RCU idle mode right before resuming userspace.  No use of RCU
 * is permitted between this call and rcu_user_exit(). This way the
 * CPU doesn't need to maintain the tick for RCU maintenance purposes
 * when the CPU runs in userspace.
670 671 672
 *
 * If you add or remove a call to rcu_user_enter(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
673 674 675
 */
void rcu_user_enter(void)
{
676
	lockdep_assert_irqs_disabled();
677
	rcu_eqs_enter(true);
678
}
679
#endif /* CONFIG_NO_HZ_FULL */
680

681
/*
682
 * If we are returning from the outermost NMI handler that interrupted an
683
 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
684 685 686
 * to let the RCU grace-period handling know that the CPU is back to
 * being RCU-idle.
 *
687
 * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
688 689
 * with CONFIG_RCU_EQS_DEBUG=y.
 */
690
static __always_inline void rcu_nmi_exit_common(bool irq)
691
{
692
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
693 694 695 696 697 698

	/*
	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
	 * (We are exiting an NMI handler, so RCU better be paying attention
	 * to us!)
	 */
699
	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
700 701 702 703 704 705
	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());

	/*
	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
	 * leave it in non-RCU-idle state.
	 */
706
	if (rdp->dynticks_nmi_nesting != 1) {
707
		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
708 709
		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
			   rdp->dynticks_nmi_nesting - 2);
710 711 712 713
		return;
	}

	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
714
	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
715
	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
716 717 718 719

	if (irq)
		rcu_prepare_for_idle();

720
	rcu_dynticks_eqs_enter();
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735

	if (irq)
		rcu_dynticks_task_enter();
}

/**
 * rcu_nmi_exit - inform RCU of exit from NMI context
 * @irq: Is this call from rcu_irq_exit?
 *
 * If you add or remove a call to rcu_nmi_exit(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
 */
void rcu_nmi_exit(void)
{
	rcu_nmi_exit_common(false);
736 737
}

738 739 740 741 742
/**
 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 *
 * Exit from an interrupt handler, which might possibly result in entering
 * idle mode, in other words, leaving the mode in which read-side critical
743
 * sections can occur.  The caller must have disabled interrupts.
744
 *
745 746
 * This code assumes that the idle loop never does anything that might
 * result in unbalanced calls to irq_enter() and irq_exit().  If your
747 748
 * architecture's idle loop violates this assumption, RCU will give you what
 * you deserve, good and hard.  But very infrequently and irreproducibly.
749 750 751 752
 *
 * Use things like work queues to work around this limitation.
 *
 * You have been warned.
753 754 755
 *
 * If you add or remove a call to rcu_irq_exit(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
756
 */
757
void rcu_irq_exit(void)
758
{
759
	lockdep_assert_irqs_disabled();
760
	rcu_nmi_exit_common(true);
761 762 763 764
}

/*
 * Wrapper for rcu_irq_exit() where interrupts are enabled.
765 766 767
 *
 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
768 769 770 771 772 773 774
 */
void rcu_irq_exit_irqson(void)
{
	unsigned long flags;

	local_irq_save(flags);
	rcu_irq_exit();
775 776 777
	local_irq_restore(flags);
}

778 779 780
/*
 * Exit an RCU extended quiescent state, which can be either the
 * idle loop or adaptive-tickless usermode execution.
781 782 783 784
 *
 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
 * allow for the possibility of usermode upcalls messing up our count of
 * interrupt nesting level during the busy period that is just now starting.
785
 */
786
static void rcu_eqs_exit(bool user)
787
{
788
	struct rcu_data *rdp;
789
	long oldval;
790

791
	lockdep_assert_irqs_disabled();
792 793
	rdp = this_cpu_ptr(&rcu_data);
	oldval = rdp->dynticks_nesting;
794
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
795
	if (oldval) {
796
		rdp->dynticks_nesting++;
797
		return;
798
	}
799 800 801
	rcu_dynticks_task_exit();
	rcu_dynticks_eqs_exit();
	rcu_cleanup_after_idle();
802
	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
803
	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
804 805 806
	WRITE_ONCE(rdp->dynticks_nesting, 1);
	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
807
}
808 809 810 811 812 813 814

/**
 * rcu_idle_exit - inform RCU that current CPU is leaving idle
 *
 * Exit idle mode, in other words, -enter- the mode in which RCU
 * read-side critical sections can occur.
 *
815 816
 * If you add or remove a call to rcu_idle_exit(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
817 818 819
 */
void rcu_idle_exit(void)
{
820 821 822
	unsigned long flags;

	local_irq_save(flags);
823
	rcu_eqs_exit(false);
824
	local_irq_restore(flags);
825
}
826

827
#ifdef CONFIG_NO_HZ_FULL
828 829 830 831 832
/**
 * rcu_user_exit - inform RCU that we are exiting userspace.
 *
 * Exit RCU idle mode while entering the kernel because it can
 * run a RCU read side critical section anytime.
833 834 835
 *
 * If you add or remove a call to rcu_user_exit(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
836 837 838
 */
void rcu_user_exit(void)
{
839
	rcu_eqs_exit(1);
840
}
841
#endif /* CONFIG_NO_HZ_FULL */
842

843
/**
844 845
 * rcu_nmi_enter_common - inform RCU of entry to NMI context
 * @irq: Is this call from rcu_irq_enter?
846
 *
847
 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
848
 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
849 850 851
 * that the CPU is active.  This implementation permits nested NMIs, as
 * long as the nesting level does not overflow an int.  (You will probably
 * run out of stack space first.)
852
 *
853
 * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
854
 * with CONFIG_RCU_EQS_DEBUG=y.
855
 */
856
static __always_inline void rcu_nmi_enter_common(bool irq)
857
{
858
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
859
	long incby = 2;
860

861
	/* Complain about underflow. */
862
	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
863 864 865 866 867 868 869 870 871

	/*
	 * If idle from RCU viewpoint, atomically increment ->dynticks
	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
	 * to be in the outermost NMI handler that interrupted an RCU-idle
	 * period (observation due to Andy Lutomirski).
	 */
872
	if (rcu_dynticks_curr_cpu_in_eqs()) {
873 874 875 876

		if (irq)
			rcu_dynticks_task_exit();

877
		rcu_dynticks_eqs_exit();
878 879 880 881

		if (irq)
			rcu_cleanup_after_idle();

882 883
		incby = 1;
	}
884
	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
885
			  rdp->dynticks_nmi_nesting,
886
			  rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
887 888
	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
		   rdp->dynticks_nmi_nesting + incby);
889
	barrier();
890 891
}

892 893 894 895 896 897 898 899
/**
 * rcu_nmi_enter - inform RCU of entry to NMI context
 */
void rcu_nmi_enter(void)
{
	rcu_nmi_enter_common(false);
}

900
/**
901
 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
902
 *
903 904
 * Enter an interrupt handler, which might possibly result in exiting
 * idle mode, in other words, entering the mode in which read-side critical
905
 * sections can occur.  The caller must have disabled interrupts.
906
 *
907
 * Note that the Linux kernel is fully capable of entering an interrupt
908 909 910 911 912 913
 * handler that it never exits, for example when doing upcalls to user mode!
 * This code assumes that the idle loop never does upcalls to user mode.
 * If your architecture's idle loop does do upcalls to user mode (or does
 * anything else that results in unbalanced calls to the irq_enter() and
 * irq_exit() functions), RCU will give you what you deserve, good and hard.
 * But very infrequently and irreproducibly.
914 915 916 917
 *
 * Use things like work queues to work around this limitation.
 *
 * You have been warned.
918 919 920
 *
 * If you add or remove a call to rcu_irq_enter(), be sure to test with
 * CONFIG_RCU_EQS_DEBUG=y.
921
 */
922
void rcu_irq_enter(void)
923
{
924
	lockdep_assert_irqs_disabled();
925
	rcu_nmi_enter_common(true);
926
}
927

928 929
/*
 * Wrapper for rcu_irq_enter() where interrupts are enabled.
930 931 932
 *
 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
933 934 935 936
 */
void rcu_irq_enter_irqson(void)
{
	unsigned long flags;
937

938 939
	local_irq_save(flags);
	rcu_irq_enter();
940 941 942
	local_irq_restore(flags);
}

943
/**
944
 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
945
 *
946 947
 * Return true if RCU is watching the running CPU, which means that this
 * CPU can safely enter RCU read-side critical sections.  In other words,
948 949
 * if the current CPU is not in its idle loop or is in an interrupt or
 * NMI handler, return true.
950
 */
951
bool notrace rcu_is_watching(void)
952
{
953
	bool ret;
954

955
	preempt_disable_notrace();
956
	ret = !rcu_dynticks_curr_cpu_in_eqs();
957
	preempt_enable_notrace();
958
	return ret;
959
}
960
EXPORT_SYMBOL_GPL(rcu_is_watching);
961

962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
/*
 * If a holdout task is actually running, request an urgent quiescent
 * state from its CPU.  This is unsynchronized, so migrations can cause
 * the request to go to the wrong CPU.  Which is OK, all that will happen
 * is that the CPU's next context switch will be a bit slower and next
 * time around this task will generate another request.
 */
void rcu_request_urgent_qs_task(struct task_struct *t)
{
	int cpu;

	barrier();
	cpu = task_cpu(t);
	if (!task_curr(t))
		return; /* This task is not running on that CPU. */
977
	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
978 979
}

980
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
981 982

/*
983
 * Is the current CPU online as far as RCU is concerned?
984
 *
985 986 987
 * Disable preemption to avoid false positives that could otherwise
 * happen due to the current CPU number being sampled, this task being
 * preempted, its old CPU being taken offline, resuming on some other CPU,
988
 * then determining that its old CPU is now offline.
989
 *
990 991 992 993
 * Disable checking if in an NMI handler because we cannot safely
 * report errors from NMI handlers anyway.  In addition, it is OK to use
 * RCU on an offline processor during initial boot, hence the check for
 * rcu_scheduler_fully_active.
994 995 996
 */
bool rcu_lockdep_current_cpu_online(void)
{
997 998
	struct rcu_data *rdp;
	struct rcu_node *rnp;
999
	bool ret = false;
1000

1001
	if (in_nmi() || !rcu_scheduler_fully_active)
Fengguang Wu's avatar
Fengguang Wu committed
1002
		return true;
1003
	preempt_disable();
1004 1005 1006 1007
	rdp = this_cpu_ptr(&rcu_data);
	rnp = rdp->mynode;
	if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
		ret = true;
1008
	preempt_enable();
1009
	return ret;
1010 1011 1012
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);

1013
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1014

1015 1016 1017
/*
 * We are reporting a quiescent state on behalf of some other CPU, so
 * it is our responsibility to check for and handle potential overflow
1018
 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1019 1020 1021 1022 1023
 * After all, the CPU might be in deep idle state, and thus executing no
 * code whatsoever.
 */
static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
{
1024
	raw_lockdep_assert_held_rcu_node(rnp);
1025 1026
	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
			 rnp->gp_seq))
1027
		WRITE_ONCE(rdp->gpwrap, true);
1028 1029
	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1030 1031
}

1032 1033 1034
/*
 * Snapshot the specified CPU's dynticks counter so that we can later
 * credit them with an implicit quiescent state.  Return 1 if this CPU
1035
 * is in dynticks idle mode, which is an extended quiescent state.
1036
 */
1037
static int dyntick_save_progress_counter(struct rcu_data *rdp)
1038
{
1039
	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1040
	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1041
		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1042
		rcu_gpnum_ovf(rdp->mynode, rdp);
1043
		return 1;
1044
	}
1045
	return 0;
1046 1047
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
/*
 * Handler for the irq_work request posted when a grace period has
 * gone on for too long, but not yet long enough for an RCU CPU
 * stall warning.  Set state appropriately, but just complain if
 * there is unexpected state on entry.
 */
static void rcu_iw_handler(struct irq_work *iwp)
{
	struct rcu_data *rdp;
	struct rcu_node *rnp;

	rdp = container_of(iwp, struct rcu_data, rcu_iw);
	rnp = rdp->mynode;
	raw_spin_lock_rcu_node(rnp);
	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
1063
		rdp->rcu_iw_gp_seq = rnp->gp_seq;
1064 1065 1066 1067 1068
		rdp->rcu_iw_pending = false;
	}
	raw_spin_unlock_rcu_node(rnp);
}

1069 1070 1071 1072
/*
 * Return true if the specified CPU has passed through a quiescent
 * state by virtue of being in or having passed through an dynticks
 * idle state since the last call to dyntick_save_progress_counter()
1073
 * for this same CPU, or by virtue of having been offline.
1074
 */
1075
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1076
{
1077
	unsigned long jtsq;
1078
	bool *rnhqp;
1079
	bool *ruqp;
1080
	struct rcu_node *rnp = rdp->mynode;
1081 1082 1083 1084 1085 1086 1087 1088 1089

	/*
	 * If the CPU passed through or entered a dynticks idle phase with
	 * no active irq/NMI handlers, then we can safely pretend that the CPU
	 * already acknowledged the request to pass through a quiescent
	 * state.  Either way, that CPU cannot possibly be in an RCU
	 * read-side critical section that started before the beginning
	 * of the current RCU grace period.
	 */
1090
	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1091
		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1092
		rcu_gpnum_ovf(rnp, rdp);
1093 1094 1095
		return 1;
	}

1096 1097
	/* If waiting too long on an offline CPU, complain. */
	if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
1098
	    time_after(jiffies, rcu_state.gp_start + HZ)) {
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
		bool onl;
		struct rcu_node *rnp1;

		WARN_ON(1);  /* Offline CPUs are supposed to report QS! */
		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
			__func__, rnp->grplo, rnp->grphi, rnp->level,
			(long)rnp->gp_seq, (long)rnp->completedqs);
		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
			__func__, rdp->cpu, ".o"[onl],
			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
		return 1; /* Break things loose after complaining. */
	}

1117
	/*
1118
	 * A CPU running for an extended time within the kernel can
1119 1120
	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1121 1122 1123 1124
	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
	 * variable are safe because the assignments are repeated if this
	 * CPU failed to pass through a quiescent state.  This code
1125
	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1126
	 * is set way high.
1127
	 */
1128
	jtsq = READ_ONCE(jiffies_to_sched_qs);
1129 1130
	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1131
	if (!READ_ONCE(*rnhqp) &&
1132
	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1133
	     time_after(jiffies, rcu_state.jiffies_resched))) {
1134
		WRITE_ONCE(*rnhqp, true);
1135 1136
		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
		smp_store_release(ruqp, true);
1137 1138
	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
		WRITE_ONCE(*ruqp, true);
1139 1140
	}

1141
	/*
1142
	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1143 1144 1145 1146 1147
	 * The above code handles this, but only for straight cond_resched().
	 * And some in-kernel loops check need_resched() before calling
	 * cond_resched(), which defeats the above code for CPUs that are
	 * running in-kernel with scheduling-clock interrupts disabled.
	 * So hit them over the head with the resched_cpu() hammer!
1148
	 */
1149 1150 1151
	if (tick_nohz_full_cpu(rdp->cpu) &&
		   time_after(jiffies,
			      READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) {
1152
		resched_cpu(rdp->cpu);
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
	}

	/*
	 * If more than halfway to RCU CPU stall-warning time, invoke
	 * resched_cpu() more frequently to try to loosen things up a bit.
	 * Also check to see if the CPU is getting hammered with interrupts,
	 * but only once per grace period, just to keep the IPIs down to
	 * a dull roar.
	 */
	if (time_after(jiffies, rcu_state.jiffies_resched)) {
		if (time_after(jiffies,
			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
			resched_cpu(rdp->cpu);
			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
		}
1169
		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1170
		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1171 1172 1173
		    (rnp->ffmask & rdp->grpmask)) {
			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
			rdp->rcu_iw_pending = true;
1174
			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1175 1176 1177
			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
		}
	}
1178

1179
	return 0;
1180 1181
}

1182
static void record_gp_stall_check_time(void)
1183
{
1184
	unsigned long j = jiffies;
1185
	unsigned long j1;
1186

1187
	rcu_state.gp_start = j;
1188
	j1 = rcu_jiffies_till_stall_check();
1189
	/* Record ->gp_start before ->jiffies_stall. */
1190 1191 1192
	smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
	rcu_state.jiffies_resched = j + j1 / 2;
	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
1193 1194
}

1195 1196 1197
/*
 * Complain about starvation of grace-period kthread.
 */
1198
static void rcu_check_gp_kthread_starvation(void)
1199
{
1200
	struct task_struct *gpk = rcu_state.gp_kthread;
1201 1202
	unsigned long j;

1203 1204
	j = jiffies - READ_ONCE(rcu_state.gp_activity);
	if (j > 2 * HZ) {
1205
		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1206 1207
		       rcu_state.name, j,
		       (long)rcu_seq_current(&rcu_state.gp_seq),
1208
		       READ_ONCE(rcu_state.gp_flags),
1209 1210 1211
		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
		       gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
		if (gpk) {
1212
			pr_err("RCU grace-period kthread stack dump:\n");
1213 1214
			sched_show_task(gpk);
			wake_up_process(gpk);
1215
		}
1216
	}
1217 1218
}

1219
/*
1220 1221 1222 1223
 * Dump stacks of all tasks running on stalled CPUs.  First try using
 * NMIs, but fall back to manual remote stack tracing on architectures
 * that don't support NMI-based stack dumps.  The NMI-triggered stack
 * traces are more accurate because they are printed by the target CPU.
1224
 */
1225
static void rcu_dump_cpu_stacks(void)
1226 1227 1228 1229 1230
{
	int cpu;
	unsigned long flags;
	struct rcu_node *rnp;

1231
	rcu_for_each_leaf_node(rnp) {
1232
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1233 1234 1235
		for_each_leaf_node_possible_cpu(rnp, cpu)
			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
				if (!trigger_single_cpu_backtrace(cpu))
1236
					dump_cpu_task(cpu);
Boqun Feng's avatar
Boqun Feng committed
1237
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1238 1239 1240
	}
}

1241 1242 1243 1244
/*
 * If too much time has passed in the current grace period, and if
 * so configured, go kick the relevant kthreads.
 */
1245
static void rcu_stall_kick_kthreads(void)
1246 1247 1248 1249 1250
{
	unsigned long j;

	if (!rcu_kick_kthreads)
		return;
1251 1252 1253 1254 1255
	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
			  rcu_state.name);
1256
		rcu_ftrace_dump(DUMP_ALL);
1257 1258
		wake_up_process(rcu_state.gp_kthread);
		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
1259 1260 1261
	}
}

1262
static void panic_on_rcu_stall(void)
1263 1264 1265 1266 1267
{
	if (sysctl_panic_on_rcu_stall)
		panic("RCU Stall\n");
}

1268
static void print_other_cpu_stall(unsigned long gp_seq)
1269 1270 1271
{
	int cpu;
	unsigned long flags;
1272 1273
	unsigned long gpa;
	unsigned long j;
1274
	int ndetected = 0;
1275
	struct rcu_node *rnp = rcu_get_root();
1276
	long totqlen = 0;
1277

1278
	/* Kick and suppress, if so configured. */
1279
	rcu_stall_kick_kthreads();
1280 1281 1282
	if (rcu_cpu_stall_suppress)
		return;

1283 1284 1285 1286 1287
	/*
	 * OK, time to rat on our buddy...
	 * See Documentation/RCU/stallwarn.txt for info on how to debug
	 * RCU CPU stall warnings.
	 */
1288
	pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
1289
	print_cpu_stall_info_begin();
1290
	rcu_for_each_leaf_node(rnp) {
1291
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1292
		ndetected += rcu_print_task_stall(rnp);
1293
		if (rnp->qsmask != 0) {
1294 1295
			for_each_leaf_node_possible_cpu(rnp, cpu)
				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1296
					print_cpu_stall_info(cpu);
1297 1298 1299
					ndetected++;
				}
		}
Boqun Feng's avatar
Boqun Feng committed
1300
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1301
	}
1302 1303

	print_cpu_stall_info_end();
1304
	for_each_possible_cpu(cpu)
1305
		totqlen += rcu_get_n_cbs_cpu(cpu);
1306
	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
1307 1308
	       smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
1309
	if (ndetected) {
1310
		rcu_dump_cpu_stacks();
1311 1312

		/* Complain about tasks blocking the grace period. */
1313
		rcu_print_detail_task_stall();
1314
	} else {
1315
		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
1316 1317 1318
			pr_err("INFO: Stall ended before state dump start\n");
		} else {
			j = jiffies;
1319
			gpa = READ_ONCE(rcu_state.gp_activity);
1320
			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1321
			       rcu_state.name, j - gpa, j, gpa,
1322
			       READ_ONCE(jiffies_till_next_fqs),
1323
			       rcu_get_root()->qsmask);
1324 1325 1326 1327
			/* In this case, the current CPU might be at fault. */
			sched_show_task(current);
		}
	}
1328
	/* Rewrite if needed in case of slow consoles. */
1329 1330
	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
		WRITE_ONCE(rcu_state.jiffies_stall,
1331
			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1332

1333
	rcu_check_gp_kthread_starvation();
1334

1335 1336
	panic_on_rcu_stall();

1337
	rcu_force_quiescent_state();  /* Kick them all. */
1338 1339
}

1340
static void print_cpu_stall(void)
1341
{
1342
	int cpu;
1343
	unsigned long flags;
1344
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1345
	struct rcu_node *rnp = rcu_get_root();
1346
	long totqlen = 0;
1347

1348
	/* Kick and suppress, if so configured. */
1349
	rcu_stall_kick_kthreads();
1350 1351 1352
	if (rcu_cpu_stall_suppress)
		return;

1353 1354 1355 1356 1357
	/*
	 * OK, time to rat on ourselves...
	 * See Documentation/RCU/stallwarn.txt for info on how to debug
	 * RCU CPU stall warnings.
	 */
1358
	pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
1359
	print_cpu_stall_info_begin();
1360
	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
1361
	print_cpu_stall_info(smp_processor_id());
1362
	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
1363
	print_cpu_stall_info_end();
1364
	for_each_possible_cpu(cpu)
1365
		totqlen += rcu_get_n_cbs_cpu(cpu);
1366
	pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
1367 1368
		jiffies - rcu_state.gp_start,
		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
1369

1370
	rcu_check_gp_kthread_starvation();
1371

1372
	rcu_dump_cpu_stacks();
1373

1374
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1375
	/* Rewrite if needed in case of slow consoles. */
1376 1377
	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
		WRITE_ONCE(rcu_state.jiffies_stall,
1378
			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
Boqun Feng's avatar
Boqun Feng committed
1379
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1380

1381 1382
	panic_on_rcu_stall();

1383 1384 1385 1386 1387 1388 1389
	/*
	 * Attempt to revive the RCU machinery by forcing a context switch.
	 *
	 * A context switch would normally allow the RCU state machine to make
	 * progress and it could be we're stuck in kernel space without context
	 * switches for an entirely unreasonable amount of time.
	 */
1390 1391
	set_tsk_need_resched(current);
	set_preempt_need_resched();
1392 1393
}

1394
static void check_cpu_stall(struct rcu_data *rdp)
1395
{
1396 1397
	unsigned long gs1;
	unsigned long gs2;
1398
	unsigned long gps;
1399
	unsigned long j;
1400
	unsigned long jn;
1401
	unsigned long js;
1402 1403
	struct rcu_node *rnp;

1404
	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1405
	    !rcu_gp_in_progress())
1406
		return;
1407
	rcu_stall_kick_kthreads();
1408
	j = jiffies;
1409 1410 1411 1412

	/*
	 * Lots of memory barriers to reject false positives.
	 *
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
	 * The idea is to pick up rcu_state.gp_seq, then
	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
	 * another copy of rcu_state.gp_seq.  These values are updated in
	 * the opposite order with memory barriers (or equivalent) during
	 * grace-period initialization and cleanup.  Now, a false positive
	 * can occur if we get an new value of rcu_state.gp_start and a old
	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
	 * the only way that this can happen is if one grace period ends
	 * and another starts between these two fetches.  This is detected
	 * by comparing the second fetch of rcu_state.gp_seq with the
	 * previous fetch from rcu_state.gp_seq.
1424
	 *
1425 1426
	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
	 * and rcu_state.gp_start suffice to forestall false positives.
1427
	 */
1428
	gs1 = READ_ONCE(rcu_state.gp_seq);
1429
	smp_rmb(); /* Pick up ->gp_seq first... */
1430
	js = READ_ONCE(rcu_state.jiffies_stall);
1431
	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1432
	gps = READ_ONCE(rcu_state.gp_start);
1433
	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
1434
	gs2 = READ_ONCE(rcu_state.gp_seq);
1435
	if (gs1 != gs2 ||
1436 1437 1438
	    ULONG_CMP_LT(j, js) ||
	    ULONG_CMP_GE(gps, js))
		return; /* No stall or GP completed since entering function. */
1439
	rnp = rdp->mynode;
1440
	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
1441
	if (rcu_gp_in_progress() &&
1442
	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
1443
	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
1444 1445

		/* We haven't checked in, so go dump stack. */
1446
		print_cpu_stall();
1447

1448
	} else if (rcu_gp_in_progress() &&
1449
		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
1450
		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
1451

1452
		/* They had a few time units to dump stack, so complain. */
1453
		print_other_cpu_stall(gs2);
1454 1455 1456
	}
}

1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
/**
 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
 *
 * Set the stall-warning timeout way off into the future, thus preventing
 * any RCU CPU stall-warning messages from appearing in the current set of
 * RCU grace periods.
 *
 * The caller must disable hard irqs.
 */
void rcu_cpu_stall_reset(void)
{
1468
	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
1469 1470
}

1471 1472
/* Trace-event wrapper function for trace_rcu_future_grace_period.  */
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1473
			      unsigned long gp_seq_req, const char *s)
1474
{
1475
	trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
1476
				      rnp->level, rnp->grplo, rnp->grphi, s);
1477 1478 1479
}

/*
1480
 * rcu_start_this_gp - Request the start of a particular grace period
1481
 * @rnp_start: The leaf node of the CPU from which to start.
1482 1483 1484
 * @rdp: The rcu_data corresponding to the CPU from which to start.
 * @gp_seq_req: The gp_seq of the grace period to start.
 *
1485
 * Start the specified grace period, as needed to handle newly arrived
1486
 * callbacks.  The required future grace periods are recorded in each
1487
 * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1488
 * is reason to awaken the grace-period kthread.
1489
 *
1490 1491
 * The caller must hold the specified rcu_node structure's ->lock, which
 * is why the caller is responsible for waking the grace-period kthread.
1492 1493
 *
 * Returns true if the GP thread needs to be awakened else false.
1494
 */
1495
static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1496
			      unsigned long gp_seq_req)
1497
{
1498
	bool ret = false;
1499
	struct rcu_node *rnp;
1500 1501

	/*
1502 1503
	 * Use funnel locking to either acquire the root rcu_node
	 * structure's lock or bail out if the need for this grace period
1504 1505 1506 1507 1508
	 * has already been recorded -- or if that grace period has in
	 * fact already started.  If there is already a grace period in
	 * progress in a non-leaf node, no recording is needed because the
	 * end of the grace period will scan the leaf rcu_node structures.
	 * Note that rnp_start->lock must not be released.
1509
	 */
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
	raw_lockdep_assert_held_rcu_node(rnp_start);
	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
	for (rnp = rnp_start; 1; rnp = rnp->parent) {
		if (rnp != rnp_start)
			raw_spin_lock_rcu_node(rnp);
		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
		    (rnp != rnp_start &&
		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1520
					  TPS("Prestarted"));
1521 1522
			goto unlock_out;
		}
1523
		rnp->gp_seq_needed = gp_seq_req;
1524
		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1525
			/*
1526 1527 1528 1529
			 * We just marked the leaf or internal node, and a
			 * grace period is in progress, which means that
			 * rcu_gp_cleanup() will see the marking.  Bail to
			 * reduce contention.
1530
			 */
1531
			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1532
					  TPS("Startedleaf"));
1533 1534
			goto unlock_out;
		}
1535 1536 1537
		if (rnp != rnp_start && rnp->parent != NULL)
			raw_spin_unlock_rcu_node(rnp);
		if (!rnp->parent)
1538
			break;  /* At root, and perhaps also leaf. */
1539 1540
	}

1541
	/* If GP already in progress, just leave, otherwise start one. */
1542
	if (rcu_gp_in_progress()) {
1543
		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1544 1545
		goto unlock_out;
	}
1546
	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1547 1548 1549
	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
	rcu_state.gp_req_activity = jiffies;
	if (!rcu_state.gp_kthread) {
1550
		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1551
		goto unlock_out;
1552
	}
1553
	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq"));
1554
	ret = true;  /* Caller must wake GP kthread. */
1555
unlock_out:
1556
	/* Push furthest requested GP to leaf node and rcu_data structure. */
1557 1558 1559
	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
		rnp_start->gp_seq_needed = rnp->gp_seq_needed;
		rdp->gp_seq_needed = rnp->gp_seq_needed;
1560
	}
1561 1562
	if (rnp != rnp_start)
		raw_spin_unlock_rcu_node(rnp);
1563
	return ret;
1564 1565 1566 1567
}

/*
 * Clean up any old requests for the just-ended grace period.  Also return
1568
 * whether any additional grace periods have been requested.
1569
 */
1570
static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1571
{
1572
	bool needmore;
1573
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1574

1575 1576 1577
	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
	if (!needmore)
		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1578
	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1579
			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1580 1581 1582
	return needmore;
}

1583
/*
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
 * Awaken the grace-period kthread.  Don't do a self-awaken (unless in
 * an interrupt or softirq handler), and don't bother awakening when there
 * is nothing for the grace-period kthread to do (as in several CPUs raced
 * to awaken, and we lost), and finally don't try to awaken a kthread that
 * has not yet been created.  If all those checks are passed, track some
 * debug information and awaken.
 *
 * So why do the self-wakeup when in an interrupt or softirq handler
 * in the grace-period kthread's context?  Because the kthread might have
 * been interrupted just as it was going to sleep, and just after the final
 * pre-sleep check of the awaken condition.  In this case, a wakeup really
 * is required, and is therefore supplied.
1596
 */
1597
static void rcu_gp_kthread_wake(void)
1598
{
1599 1600
	if ((current == rcu_state.gp_kthread &&
	     !in_interrupt() && !in_serving_softirq()) ||
1601 1602
	    !READ_ONCE(rcu_state.gp_flags) ||
	    !rcu_state.gp_kthread)
1603
		return;
1604 1605
	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1606
	swake_up_one(&rcu_state.gp_wq);
1607 1608
}

1609
/*
1610 1611 1612 1613 1614 1615 1616 1617
 * If there is room, assign a ->gp_seq number to any callbacks on this
 * CPU that have not already been assigned.  Also accelerate any callbacks
 * that were previously assigned a ->gp_seq number that has since proven
 * to be too conservative, which can happen if callbacks get assigned a
 * ->gp_seq number while RCU is idle, but with reference to a non-root
 * rcu_node structure.  This function is idempotent, so it does not hurt
 * to call it repeatedly.  Returns an flag saying that we should awaken
 * the RCU grace-period kthread.
1618 1619 1620
 *
 * The caller must hold rnp->lock with interrupts disabled.
 */
1621
static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1622
{
1623
	unsigned long gp_seq_req;
1624
	bool ret = false;
1625

1626
	raw_lockdep_assert_held_rcu_node(rnp);
1627

1628 1629
	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1630
		return false;
1631 1632

	/*
1633 1634 1635 1636 1637 1638 1639 1640
	 * Callbacks are often registered with incomplete grace-period
	 * information.  Something about the fact that getting exact
	 * information requires acquiring a global lock...  RCU therefore
	 * makes a conservative estimate of the grace period number at which
	 * a given callback will become ready to invoke.	The following
	 * code checks this estimate and improves it when possible, thus
	 * accelerating callback invocation to an earlier grace-period
	 * number.
1641
	 */
1642
	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1643 1644
	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1645 1646

	/* Trace depending on how much we were able to accelerate. */
1647
	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1648
		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
1649
	else
1650
		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
1651
	return ret;
1652 1653
}

1654 1655 1656 1657 1658 1659 1660
/*
 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
 * rcu_node structure's ->lock be held.  It consults the cached value
 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
 * while holding the leaf rcu_node structure's ->lock.
 */
1661
static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1662 1663 1664 1665 1666 1667
					struct rcu_data *rdp)
{
	unsigned long c;
	bool needwake;

	lockdep_assert_irqs_disabled();
1668
	c = rcu_seq_snap(&rcu_state.gp_seq);
1669 1670 1671 1672 1673 1674
	if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
		/* Old request still live, so mark recent callbacks. */
		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
		return;
	}
	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1675
	needwake = rcu_accelerate_cbs(rnp, rdp);
1676 1677
	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
	if (needwake)
1678
		rcu_gp_kthread_wake();
1679 1680
}

1681 1682 1683
/*
 * Move any callbacks whose grace period has completed to the
 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1684
 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1685 1686
 * sublist.  This function is idempotent, so it does not hurt to
 * invoke it repeatedly.  As long as it is not invoked -too- often...
1687
 * Returns true if the RCU grace-period kthread needs to be awakened.
1688 1689 1690
 *
 * The caller must hold rnp->lock with interrupts disabled.
 */
1691
static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1692
{
1693
	raw_lockdep_assert_held_rcu_node(rnp);
1694

1695 1696
	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1697
		return false;
1698 1699

	/*
1700
	 * Find all callbacks whose ->gp_seq numbers indicate that they
1701 1702
	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
	 */
1703
	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1704 1705

	/* Classify any remaining callbacks. */
1706
	return rcu_accelerate_cbs(rnp, rdp);
1707 1708
}

1709
/*
1710 1711 1712
 * Update CPU-local rcu_data state to record the beginnings and ends of
 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
 * structure corresponding to the current CPU, and must have irqs disabled.
1713
 * Returns true if the grace-period kthread needs to be awakened.
1714
 */
1715
static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1716
{
1717
	bool ret;
1718
	bool need_gp;
1719

1720
	raw_lockdep_assert_held_rcu_node(rnp);
1721

1722 1723
	if (rdp->gp_seq == rnp->gp_seq)
		return false; /* Nothing to do. */
1724

1725 1726 1727
	/* Handle the ends of any preceding grace periods first. */
	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
	    unlikely(READ_ONCE(rdp->gpwrap))) {
1728
		ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */
1729
		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1730
	} else {
1731
		ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
1732
	}
1733

1734 1735 1736
	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
	    unlikely(READ_ONCE(rdp->gpwrap))) {
1737 1738 1739 1740 1741
		/*
		 * If the current grace period is waiting for this CPU,
		 * set up to detect a quiescent state, otherwise don't
		 * go looking for one.
		 */
1742
		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1743 1744 1745
		need_gp = !!(rnp->qsmask & rdp->grpmask);
		rdp->cpu_no_qs.b.norm = need_gp;
		rdp->core_needs_qs = need_gp;
1746 1747
		zero_cpu_stall_ticks(rdp);
	}
1748
	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1749
	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1750 1751 1752
		rdp->gp_seq_needed = rnp->gp_seq_needed;
	WRITE_ONCE(rdp->gpwrap, false);
	rcu_gpnum_ovf(rnp, rdp);
1753
	return ret;
1754 1755
}

1756
static void note_gp_changes(struct rcu_data *rdp)
1757 1758
{
	unsigned long flags;
1759
	bool needwake;
1760 1761 1762 1763
	struct rcu_node *rnp;

	local_irq_save(flags);
	rnp = rdp->mynode;
1764
	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1765
	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1766
	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1767 1768 1769
		local_irq_restore(flags);
		return;
	}
1770
	needwake = __note_gp_changes(rnp, rdp);
Boqun Feng's avatar
Boqun Feng committed
1771
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1772
	if (needwake)
1773
		rcu_gp_kthread_wake();
1774 1775
}

1776
static void rcu_gp_slow(int delay)
1777 1778
{
	if (delay > 0 &&
1779
	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1780
	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1781 1782 1783
		schedule_timeout_uninterruptible(delay);
}

1784
/*
1785
 * Initialize a new grace period.  Return false if no grace period required.
1786
 */
1787
static bool rcu_gp_init(void)
1788
{
1789
	unsigned long flags;
1790
	unsigned long oldmask;
1791
	unsigned long mask;
1792
	struct rcu_data *rdp;
1793
	struct rcu_node *rnp = rcu_get_root();
1794

1795
	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1796
	raw_spin_lock_irq_rcu_node(rnp);
1797
	if (!READ_ONCE(rcu_state.gp_flags)) {
1798
		/* Spurious wakeup, tell caller to go back to sleep.  */
Boqun Feng's avatar
Boqun Feng committed
1799
		raw_spin_unlock_irq_rcu_node(rnp);
1800
		return false;
1801
	}
1802
	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1803

1804
	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1805 1806 1807 1808
		/*
		 * Grace period already in progress, don't start another.
		 * Not supposed to be able to happen.
		 */
Boqun Feng's avatar
Boqun Feng committed
1809
		raw_spin_unlock_irq_rcu_node(rnp);
1810
		return false;
1811 1812 1813
	}

	/* Advance to a new grace period and initialize state. */
1814
	record_gp_stall_check_time();
1815
	/* Record GP times before starting GP, hence rcu_seq_start(). */
1816 1817
	rcu_seq_start(&rcu_state.gp_seq);
	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
Boqun Feng's avatar
Boqun Feng committed
1818
	raw_spin_unlock_irq_rcu_node(rnp);
1819

1820 1821 1822 1823 1824 1825
	/*
	 * Apply per-leaf buffered online and offline operations to the
	 * rcu_node tree.  Note that this new grace period need not wait
	 * for subsequent online CPUs, and that quiescent-state forcing
	 * will handle subsequent offline CPUs.
	 */
1826
	rcu_state.gp_state = RCU_GP_ONOFF;
1827
	rcu_for_each_leaf_node(rnp) {
1828
		raw_spin_lock(&rcu_state.ofl_lock);
1829
		raw_spin_lock_irq_rcu_node(rnp);
1830 1831 1832
		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
		    !rnp->wait_blkd_tasks) {
			/* Nothing to do on this leaf rcu_node structure. */
Boqun Feng's avatar
Boqun Feng committed
1833
			raw_spin_unlock_irq_rcu_node(rnp);
1834
			raw_spin_unlock(&rcu_state.ofl_lock);
1835 1836 1837 1838 1839 1840 1841 1842 1843
			continue;
		}

		/* Record old state, apply changes to ->qsmaskinit field. */
		oldmask = rnp->qsmaskinit;
		rnp->qsmaskinit = rnp->qsmaskinitnext;

		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
		if (!oldmask != !rnp->qsmaskinit) {
1844 1845 1846 1847 1848 1849
			if (!oldmask) { /* First online CPU for rcu_node. */
				if (!rnp->wait_blkd_tasks) /* Ever offline? */
					rcu_init_new_rnp(rnp);
			} else if (rcu_preempt_has_tasks(rnp)) {
				rnp->wait_blkd_tasks = true; /* blocked tasks */
			} else { /* Last offline CPU and can propagate. */
1850
				rcu_cleanup_dead_rnp(rnp);
1851
			}
1852 1853 1854 1855 1856 1857 1858 1859
		}

		/*
		 * If all waited-on tasks from prior grace period are
		 * done, and if all this rcu_node structure's CPUs are
		 * still offline, propagate up the rcu_node tree and
		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
		 * rcu_node structure's CPUs has since come back online,
1860
		 * simply clear ->wait_blkd_tasks.
1861 1862
		 */
		if (rnp->wait_blkd_tasks &&
1863
		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1864
			rnp->wait_blkd_tasks = false;
1865 1866
			if (!rnp->qsmaskinit)
				rcu_cleanup_dead_rnp(rnp);
1867 1868
		}

Boqun Feng's avatar
Boqun Feng committed
1869
		raw_spin_unlock_irq_rcu_node(rnp);
1870
		raw_spin_unlock(&rcu_state.ofl_lock);
1871
	}
1872
	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1873 1874 1875

	/*
	 * Set the quiescent-state-needed bits in all the rcu_node
1876 1877 1878 1879 1880 1881
	 * structures for all currently online CPUs in breadth-first
	 * order, starting from the root rcu_node structure, relying on the
	 * layout of the tree within the rcu_state.node[] array.  Note that
	 * other CPUs will access only the leaves of the hierarchy, thus
	 * seeing that no grace period is in progress, at least until the
	 * corresponding leaf node has been initialized.
1882 1883 1884 1885
	 *
	 * The grace period cannot complete until the initialization
	 * process finishes, because this kthread handles both.
	 */
1886
	rcu_state.gp_state = RCU_GP_INIT;
1887
	rcu_for_each_node_breadth_first(rnp) {
1888
		rcu_gp_slow(gp_init_delay);
1889
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1890
		rdp = this_cpu_ptr(&rcu_data);
1891
		rcu_preempt_check_blocked_tasks(rnp);
1892
		rnp->qsmask = rnp->qsmaskinit;
1893
		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1894
		if (rnp == rdp->mynode)
1895
			(void)__note_gp_changes(rnp, rdp);
1896
		rcu_preempt_boost_start_gp(rnp);
1897
		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1898 1899
					    rnp->level, rnp->grplo,
					    rnp->grphi, rnp->qsmask);
1900 1901
		/* Quiescent states for tasks on any now-offline CPUs. */
		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1902
		rnp->rcu_gp_init_mask = mask;
1903
		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1904
			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1905 1906
		else
			raw_spin_unlock_irq_rcu_node(rnp);
1907
		cond_resched_tasks_rcu_qs();
1908
		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1909
	}
1910

1911
	return true;
1912
}
1913

1914
/*
1915
 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1916
 * time.
1917
 */
1918
static bool rcu_gp_fqs_check_wake(int *gfp)
1919
{
1920
	struct rcu_node *rnp = rcu_get_root();
1921 1922

	/* Someone like call_rcu() requested a force-quiescent-state scan. */
1923
	*gfp = READ_ONCE(rcu_state.gp_flags);
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
	if (*gfp & RCU_GP_FLAG_FQS)
		return true;

	/* The current grace period has completed. */
	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
		return true;

	return false;
}

1934 1935 1936
/*
 * Do one round of quiescent-state forcing.
 */
1937
static void rcu_gp_fqs(bool first_time)
1938
{
1939
	struct rcu_node *rnp = rcu_get_root();
1940

1941 1942
	WRITE_ONCE(rcu_state.gp_activity, jiffies);
	rcu_state.n_force_qs++;
1943
	if (first_time) {
1944
		/* Collect dyntick-idle snapshots. */
1945
		force_qs_rnp(dyntick_save_progress_counter);
1946 1947
	} else {
		/* Handle dyntick-idle and offline CPUs. */
1948
		force_qs_rnp(rcu_implicit_dynticks_qs);
1949 1950
	}
	/* Clear flag to prevent immediate re-entry. */
1951
	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1952
		raw_spin_lock_irq_rcu_node(rnp);
1953 1954
		WRITE_ONCE(rcu_state.gp_flags,
			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
Boqun Feng's avatar
Boqun Feng committed
1955
		raw_spin_unlock_irq_rcu_node(rnp);
1956 1957 1958
	}
}

1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
/*
 * Loop doing repeated quiescent-state forcing until the grace period ends.
 */
static void rcu_gp_fqs_loop(void)
{
	bool first_gp_fqs;
	int gf;
	unsigned long j;
	int ret;
	struct rcu_node *rnp = rcu_get_root();

	first_gp_fqs = true;
1971
	j = READ_ONCE(jiffies_till_first_fqs);
1972 1973 1974 1975 1976
	ret = 0;
	for (;;) {
		if (!ret) {
			rcu_state.jiffies_force_qs = jiffies + j;
			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1977
				   jiffies + (j ? 3 * j : 2));
1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
		}
		trace_rcu_grace_period(rcu_state.name,
				       READ_ONCE(rcu_state.gp_seq),
				       TPS("fqswait"));
		rcu_state.gp_state = RCU_GP_WAIT_FQS;
		ret = swait_event_idle_timeout_exclusive(
				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
		rcu_state.gp_state = RCU_GP_DOING_FQS;
		/* Locking provides needed memory barriers. */
		/* If grace period done, leave loop. */
		if (!READ_ONCE(rnp->qsmask) &&
		    !rcu_preempt_blocked_readers_cgp(rnp))
			break;
		/* If time for quiescent-state forcing, do it. */
		if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
		    (gf & RCU_GP_FLAG_FQS)) {
			trace_rcu_grace_period(rcu_state.name,
					       READ_ONCE(rcu_state.gp_seq),
					       TPS("fqsstart"));
			rcu_gp_fqs(first_gp_fqs);
			first_gp_fqs = false;
			trace_rcu_grace_period(rcu_state.name,
					       READ_ONCE(rcu_state.gp_seq),
					       TPS("fqsend"));
			cond_resched_tasks_rcu_qs();
			WRITE_ONCE(rcu_state.gp_activity, jiffies);
			ret = 0; /* Force full wait till next FQS. */
2005
			j = READ_ONCE(jiffies_till_next_fqs);
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
		} else {
			/* Deal with stray signal. */
			cond_resched_tasks_rcu_qs();
			WRITE_ONCE(rcu_state.gp_activity, jiffies);
			WARN_ON(signal_pending(current));
			trace_rcu_grace_period(rcu_state.name,
					       READ_ONCE(rcu_state.gp_seq),
					       TPS("fqswaitsig"));
			ret = 1; /* Keep old FQS timing. */
			j = jiffies;
			if (time_after(jiffies, rcu_state.jiffies_force_qs))
				j = 1;
			else
				j = rcu_state.jiffies_force_qs - j;
		}
	}
}

2024 2025 2026
/*
 * Clean up after the old grace period.
 */
2027
static void rcu_gp_cleanup(void)
2028 2029
{
	unsigned long gp_duration;
2030
	bool needgp = false;
2031
	unsigned long new_gp_seq;
2032
	struct rcu_data *rdp;
2033
	struct rcu_node *rnp = rcu_get_root();
2034
	struct swait_queue_head *sq;
2035

2036
	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2037
	raw_spin_lock_irq_rcu_node(rnp);
2038 2039
	rcu_state.gp_end = jiffies;
	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2040 2041
	if (gp_duration > rcu_state.gp_max)
		rcu_state.gp_max = gp_duration;
2042

2043 2044 2045 2046 2047 2048 2049 2050
	/*
	 * We know the grace period is complete, but to everyone else
	 * it appears to still be ongoing.  But it is also the case
	 * that to everyone else it looks like there is nothing that
	 * they can do to advance the grace period.  It is therefore
	 * safe for us to drop the lock in order to mark the grace
	 * period as completed in all of the rcu_node structures.
	 */
Boqun Feng's avatar
Boqun Feng committed
2051
	raw_spin_unlock_irq_rcu_node(rnp);
2052

2053
	/*
2054 2055 2056 2057 2058 2059 2060
	 * Propagate new ->gp_seq value to rcu_node structures so that
	 * other CPUs don't have to wait until the start of the next grace
	 * period to process their callbacks.  This also avoids some nasty
	 * RCU grace-period initialization races by forcing the end of
	 * the current grace period to be completely recorded in all of
	 * the rcu_node structures before the beginning of the next grace
	 * period is recorded in any of the rcu_node structures.
2061
	 */
2062
	new_gp_seq = rcu_state.gp_seq;
2063
	rcu_seq_end(&new_gp_seq);
2064
	rcu_for_each_node_breadth_first(rnp) {
2065
		raw_spin_lock_irq_rcu_node(rnp);
2066
		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2067
			dump_blkd_tasks(rnp, 10);
2068
		WARN_ON_ONCE(rnp->qsmask);
2069
		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2070
		rdp = this_cpu_ptr(&rcu_data);
2071
		if (rnp == rdp->mynode)
2072
			needgp = __note_gp_changes(rnp, rdp) || needgp;
2073
		/* smp_mb() provided by prior unlock-lock pair. */
2074
		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2075
		sq = rcu_nocb_gp_get(rnp);
Boqun Feng's avatar
Boqun Feng committed
2076
		raw_spin_unlock_irq_rcu_node(rnp);
2077
		rcu_nocb_gp_cleanup(sq);
2078
		cond_resched_tasks_rcu_qs();
2079
		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2080
		rcu_gp_slow(gp_cleanup_delay);
2081
	}
2082
	rnp = rcu_get_root();
2083
	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2084

2085
	/* Declare grace period done, trace first to use old GP number. */
2086
	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2087
	rcu_seq_end(&rcu_state.gp_seq);
2088
	rcu_state.gp_state = RCU_GP_IDLE;
2089
	/* Check for GP requests since above loop. */
2090
	rdp = this_cpu_ptr(&rcu_data);
2091
	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2092
		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2093
				  TPS("CleanupMore"));
2094 2095
		needgp = true;
	}
2096
	/* Advance CBs to reduce false positives below. */
2097
	if (!rcu_accelerate_cbs(rnp, rdp) && needgp) {
2098 2099 2100 2101
		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
		rcu_state.gp_req_activity = jiffies;
		trace_rcu_grace_period(rcu_state.name,
				       READ_ONCE(rcu_state.gp_seq),
2102
				       TPS("newreq"));
2103
	} else {
2104 2105
		WRITE_ONCE(rcu_state.gp_flags,
			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2106
	}
Boqun Feng's avatar
Boqun Feng committed
2107
	raw_spin_unlock_irq_rcu_node(rnp);
2108 2109 2110 2111 2112
}

/*
 * Body of kthread that handles grace periods.
 */
2113
static int __noreturn rcu_gp_kthread(void *unused)
2114
{
2115
	rcu_bind_gp_kthread();
2116 2117 2118 2119
	for (;;) {

		/* Handle grace-period start. */
		for (;;) {
2120 2121
			trace_rcu_grace_period(rcu_state.name,
					       READ_ONCE(rcu_state.gp_seq),
2122
					       TPS("reqwait"));
2123 2124 2125 2126 2127
			rcu_state.gp_state = RCU_GP_WAIT_GPS;
			swait_event_idle_exclusive(rcu_state.gp_wq,
					 READ_ONCE(rcu_state.gp_flags) &
					 RCU_GP_FLAG_INIT);
			rcu_state.gp_state = RCU_GP_DONE_GPS;
2128
			/* Locking provides needed memory barrier. */
2129
			if (rcu_gp_init())
2130
				break;
2131
			cond_resched_tasks_rcu_qs();
2132
			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2133
			WARN_ON(signal_pending(current));
2134 2135
			trace_rcu_grace_period(rcu_state.name,
					       READ_ONCE(rcu_state.gp_seq),
2136
					       TPS("reqwaitsig"));
2137
		}
2138

2139
		/* Handle quiescent-state forcing. */
2140
		rcu_gp_fqs_loop();
2141 2142

		/* Handle grace-period end. */
2143
		rcu_state.gp_state = RCU_GP_CLEANUP;
2144
		rcu_gp_cleanup();
2145
		rcu_state.gp_state = RCU_GP_CLEANED;
2146 2147 2148
	}
}

2149
/*
2150 2151 2152 2153 2154 2155 2156
 * Report a full set of quiescent states to the rcu_state data structure.
 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
 * another grace period is required.  Whether we wake the grace-period
 * kthread or it awakens itself for the next round of quiescent-state
 * forcing, that kthread will clean up after the just-completed grace
 * period.  Note that the caller must hold rnp->lock, which is released
 * before return.
2157
 */
2158
static void rcu_report_qs_rsp(unsigned long flags)
2159
	__releases(rcu_get_root()->lock)
2160
{
2161
	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2162
	WARN_ON_ONCE(!rcu_gp_in_progress());
2163 2164
	WRITE_ONCE(rcu_state.gp_flags,
		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2165
	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2166
	rcu_gp_kthread_wake();
2167 2168
}

2169
/*
2170 2171 2172
 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
 * Allows quiescent states for a group of CPUs to be reported at one go
 * to the specified rcu_node structure, though all the CPUs in the group
2173 2174 2175
 * must be represented by the same rcu_node structure (which need not be a
 * leaf rcu_node structure, though it often will be).  The gps parameter
 * is the grace-period snapshot, which means that the quiescent states
2176
 * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2177
 * must be held upon entry, and it is released before return.
2178 2179 2180 2181
 *
 * As a special case, if mask is zero, the bit-already-cleared check is
 * disabled.  This allows propagating quiescent state due to resumed tasks
 * during grace-period initialization.
2182
 */
2183 2184
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
			      unsigned long gps, unsigned long flags)
2185 2186
	__releases(rnp->lock)
{
2187
	unsigned long oldmask = 0;
2188 2189
	struct rcu_node *rnp_c;

2190
	raw_lockdep_assert_held_rcu_node(rnp);
2191

2192 2193
	/* Walk up the rcu_node hierarchy. */
	for (;;) {
2194
		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2195

2196 2197 2198 2199
			/*
			 * Our bit has already been cleared, or the
			 * relevant grace period is already over, so done.
			 */
Boqun Feng's avatar
Boqun Feng committed
2200
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2201 2202
			return;
		}
2203
		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2204
		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2205
			     rcu_preempt_blocked_readers_cgp(rnp));
2206
		rnp->qsmask &= ~mask;
2207
		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2208 2209 2210
						 mask, rnp->qsmask, rnp->level,
						 rnp->grplo, rnp->grphi,
						 !!rnp->gp_tasks);
2211
		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2212 2213

			/* Other bits still set at this level, so done. */
Boqun Feng's avatar
Boqun Feng committed
2214
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2215 2216
			return;
		}
2217
		rnp->completedqs = rnp->gp_seq;
2218 2219 2220 2221 2222 2223 2224
		mask = rnp->grpmask;
		if (rnp->parent == NULL) {

			/* No more levels.  Exit loop holding root lock. */

			break;
		}
Boqun Feng's avatar
Boqun Feng committed
2225
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2226
		rnp_c = rnp;
2227
		rnp = rnp->parent;
2228
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2229
		oldmask = rnp_c->qsmask;
2230 2231 2232 2233
	}

	/*
	 * Get here if we are the last CPU to pass through a quiescent
2234
	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2235
	 * to clean up and start the next grace period if one is needed.
2236
	 */
2237
	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2238 2239
}

2240 2241 2242
/*
 * Record a quiescent state for all tasks that were previously queued
 * on the specified rcu_node structure and that were blocking the current
2243
 * RCU grace period.  The caller must hold the corresponding rnp->lock with
2244 2245 2246
 * irqs disabled, and this lock is released upon return, but irqs remain
 * disabled.
 */
2247
static void __maybe_unused
2248
rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2249 2250
	__releases(rnp->lock)
{
2251
	unsigned long gps;
2252 2253 2254
	unsigned long mask;
	struct rcu_node *rnp_p;

2255
	raw_lockdep_assert_held_rcu_node(rnp);
2256
	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
2257 2258
	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
	    rnp->qsmask != 0) {
Boqun Feng's avatar
Boqun Feng committed
2259
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2260 2261 2262
		return;  /* Still need more quiescent states! */
	}

2263
	rnp->completedqs = rnp->gp_seq;
2264 2265 2266
	rnp_p = rnp->parent;
	if (rnp_p == NULL) {
		/*
2267 2268
		 * Only one rcu_node structure in the tree, so don't
		 * try to report up to its nonexistent parent!
2269
		 */
2270
		rcu_report_qs_rsp(flags);
2271 2272 2273
		return;
	}

2274 2275
	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
	gps = rnp->gp_seq;
2276
	mask = rnp->grpmask;
Boqun Feng's avatar
Boqun Feng committed
2277
	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2278
	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2279
	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2280 2281
}

2282
/*
2283
 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2284
 * structure.  This must be called from the specified CPU.
2285 2286
 */
static void
2287
rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
2288 2289 2290
{
	unsigned long flags;
	unsigned long mask;
2291
	bool needwake;
2292 2293 2294
	struct rcu_node *rnp;

	rnp = rdp->mynode;
2295
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2296 2297
	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
	    rdp->gpwrap) {
2298 2299

		/*
2300 2301 2302 2303
		 * The grace period in which this quiescent state was
		 * recorded has ended, so don't report it upwards.
		 * We will instead need a new quiescent state that lies
		 * within the current grace period.
2304
		 */
2305
		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
Boqun Feng's avatar
Boqun Feng committed
2306
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2307 2308 2309 2310
		return;
	}
	mask = rdp->grpmask;
	if ((rnp->qsmask & mask) == 0) {
Boqun Feng's avatar
Boqun Feng committed
2311
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2312
	} else {
2313
		rdp->core_needs_qs = false;
2314 2315 2316 2317 2318

		/*
		 * This GP can't end until cpu checks in, so all of our
		 * callbacks can be processed during the next GP.
		 */
2319
		needwake = rcu_accelerate_cbs(rnp, rdp);
2320

2321
		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2322
		/* ^^^ Released rnp->lock */
2323
		if (needwake)
2324
			rcu_gp_kthread_wake();
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
	}
}

/*
 * Check to see if there is a new grace period of which this CPU
 * is not yet aware, and if so, set up local rcu_data state for it.
 * Otherwise, see if this CPU has just passed through its first
 * quiescent state for this grace period, and record that fact if so.
 */
static void
2335
rcu_check_quiescent_state(struct rcu_data *rdp)
2336
{
2337
	/* Check for grace-period ends and beginnings. */
2338
	note_gp_changes(rdp);
2339 2340 2341 2342 2343

	/*
	 * Does this CPU still need to do its part for current grace period?
	 * If no, return and let the other CPUs do their part as well.
	 */
2344
	if (!rdp->core_needs_qs)
2345 2346 2347 2348 2349 2350
		return;

	/*
	 * Was there a quiescent state since the beginning of the grace
	 * period? If no, then exit and wait for the next call.
	 */
2351
	if (rdp->cpu_no_qs.b.norm)
2352 2353
		return;

2354 2355 2356 2357
	/*
	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
	 * judge of that).
	 */
2358
	rcu_report_qs_rdp(rdp->cpu, rdp);
2359 2360
}

2361
/*
2362 2363
 * Near the end of the offline process.  Trace the fact that this CPU
 * is going offline.
2364
 */
2365
int rcutree_dying_cpu(unsigned int cpu)
2366
{
2367
	RCU_TRACE(bool blkd;)
2368
	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
2369
	RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2370

2371
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2372
		return 0;
2373

2374
	RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
2375
	trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
2376
			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2377
	return 0;
2378 2379
}

2380 2381 2382 2383 2384 2385 2386 2387 2388
/*
 * All CPUs for the specified rcu_node structure have gone offline,
 * and all tasks that were preempted within an RCU read-side critical
 * section while running on one of those CPUs have since exited their RCU
 * read-side critical section.  Some other CPU is reporting this fact with
 * the specified rcu_node structure's ->lock held and interrupts disabled.
 * This function therefore goes up the tree of rcu_node structures,
 * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
 * the leaf rcu_node structure's ->qsmaskinit field has already been
2389
 * updated.
2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
 *
 * This function does check that the specified rcu_node structure has
 * all CPUs offline and no blocked tasks, so it is OK to invoke it
 * prematurely.  That said, invoking it after the fact will cost you
 * a needless lock acquisition.  So once it has done its work, don't
 * invoke it again.
 */
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
{
	long mask;
	struct rcu_node *rnp = rnp_leaf;

2402
	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2403
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2404 2405
	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2406 2407 2408 2409 2410 2411
		return;
	for (;;) {
		mask = rnp->grpmask;
		rnp = rnp->parent;
		if (!rnp)
			break;
2412
		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2413
		rnp->qsmaskinit &= ~mask;
2414 2415
		/* Between grace periods, so better already be zero! */
		WARN_ON_ONCE(rnp->qsmask);
2416
		if (rnp->qsmaskinit) {
Boqun Feng's avatar
Boqun Feng committed
2417 2418
			raw_spin_unlock_rcu_node(rnp);
			/* irqs remain disabled. */
2419 2420
			return;
		}
Boqun Feng's avatar
Boqun Feng committed
2421
		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2422 2423 2424
	}
}

2425
/*
2426
 * The CPU has been completely removed, and some other CPU is reporting
2427 2428 2429
 * this fact from process context.  Do the remainder of the cleanup.
 * There can only be one CPU hotplug operation at a time, so no need for
 * explicit locking.
2430
 */
2431
int rcutree_dead_cpu(unsigned int cpu)
2432
{
2433
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2434
	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2435

2436
	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2437
		return 0;
2438

2439
	/* Adjust any no-longer-needed kthreads. */
Thomas Gleixner's avatar
Thomas Gleixner committed
2440
	rcu_boost_kthread_setaffinity(rnp, -1);
2441 2442 2443
	/* Do any needed no-CB deferred wakeups from this CPU. */
	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
	return 0;
2444 2445 2446 2447 2448 2449
}

/*
 * Invoke any RCU callbacks that have made it to the end of their grace
 * period.  Thottle as specified by rdp->blimit.
 */
2450
static void rcu_do_batch(struct rcu_data *rdp)
2451 2452
{
	unsigned long flags;
2453 2454 2455
	struct rcu_head *rhp;
	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
	long bl, count;
2456

2457
	/* If no callbacks are ready, just return. */
2458
	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2459
		trace_rcu_batch_start(rcu_state.name,
2460 2461
				      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2462
		trace_rcu_batch_end(rcu_state.name, 0,
2463
				    !rcu_segcblist_empty(&rdp->cblist),
2464 2465
				    need_resched(), is_idle_task(current),
				    rcu_is_callbacks_kthread());
2466
		return;
2467
	}
2468 2469 2470

	/*
	 * Extract the list of ready callbacks, disabling to prevent
2471 2472
	 * races with call_rcu() from interrupt handlers.  Leave the
	 * callback counts, as rcu_barrier() needs to be conservative.
2473 2474
	 */
	local_irq_save(flags);
2475
	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2476
	bl = rdp->blimit;
2477 2478
	trace_rcu_batch_start(rcu_state.name,
			      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2479 2480
			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2481 2482 2483
	local_irq_restore(flags);

	/* Invoke callbacks. */
2484 2485 2486
	rhp = rcu_cblist_dequeue(&rcl);
	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
		debug_rcu_head_unqueue(rhp);
2487
		if (__rcu_reclaim(rcu_state.name, rhp))
2488 2489 2490 2491 2492
			rcu_cblist_dequeued_lazy(&rcl);
		/*
		 * Stop only if limit reached and CPU has something to do.
		 * Note: The rcl structure counts down from zero.
		 */
2493
		if (-rcl.len >= bl &&
2494 2495
		    (need_resched() ||
		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2496 2497 2498 2499
			break;
	}

	local_irq_save(flags);
2500
	count = -rcl.len;
2501
	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2502
			    is_idle_task(current), rcu_is_callbacks_kthread());
2503

2504 2505
	/* Update counts and requeue any remaining callbacks. */
	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2506
	smp_mb(); /* List handling before counting for rcu_barrier(). */
2507
	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2508 2509

	/* Reinstate batch limit if we have worked down the excess. */
2510 2511
	count = rcu_segcblist_n_cbs(&rdp->cblist);
	if (rdp->blimit == LONG_MAX && count <= qlowmark)
2512 2513
		rdp->blimit = blimit;

2514
	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2515
	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2516
		rdp->qlen_last_fqs_check = 0;
2517
		rdp->n_force_qs_snap = rcu_state.n_force_qs;
2518 2519
	} else if (count < rdp->qlen_last_fqs_check - qhimark)
		rdp->qlen_last_fqs_check = count;
2520 2521 2522 2523 2524

	/*
	 * The following usually indicates a double call_rcu().  To track
	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
	 */
2525
	WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2526

2527 2528
	local_irq_restore(flags);

2529
	/* Re-invoke RCU core processing if there are callbacks remaining. */
2530
	if (rcu_segcblist_ready_cbs(&rdp->cblist))
2531
		invoke_rcu_core();
2532 2533 2534
}

/*
2535 2536 2537 2538 2539 2540
 * This function is invoked from each scheduling-clock interrupt,
 * and checks to see if this CPU is in a non-context-switch quiescent
 * state, for example, user mode or idle loop.  It also schedules RCU
 * core processing.  If the current grace period has gone on too long,
 * it will ask the scheduler to manufacture a context switch for the sole
 * purpose of providing a providing the needed quiescent state.
2541
 */
2542
void rcu_sched_clock_irq(int user)
2543
{
2544
	trace_rcu_utilization(TPS("Start scheduler-tick"));
2545
	raw_cpu_inc(rcu_data.ticks_this_gp);
2546
	/* The load-acquire pairs with the store-release setting to true. */
2547
	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2548
		/* Idle and userspace execution already are quiescent states. */
2549
		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2550 2551 2552
			set_tsk_need_resched(current);
			set_preempt_need_resched();
		}
2553
		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2554
	}
2555
	rcu_flavor_sched_clock_irq(user);
2556
	if (rcu_pending())
2557
		invoke_rcu_core();
2558

2559
	trace_rcu_utilization(TPS("End scheduler-tick"));
2560 2561 2562 2563 2564
}

/*
 * Scan the leaf rcu_node structures, processing dyntick state for any that
 * have not yet encountered a quiescent state, using the function specified.
2565 2566
 * Also initiate boosting for any threads blocked on the root rcu_node.
 *
2567
 * The caller must have suppressed start of new grace periods.
2568
 */
2569
static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2570 2571 2572 2573
{
	int cpu;
	unsigned long flags;
	unsigned long mask;
2574
	struct rcu_node *rnp;
2575

2576
	rcu_for_each_leaf_node(rnp) {
2577
		cond_resched_tasks_rcu_qs();
2578
		mask = 0;
2579
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2580
		if (rnp->qsmask == 0) {
2581
			if (!IS_ENABLED(CONFIG_PREEMPT) ||
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
			    rcu_preempt_blocked_readers_cgp(rnp)) {
				/*
				 * No point in scanning bits because they
				 * are all zero.  But we might need to
				 * priority-boost blocked readers.
				 */
				rcu_initiate_boost(rnp, flags);
				/* rcu_initiate_boost() releases rnp->lock */
				continue;
			}
2592 2593
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
			continue;
2594
		}
2595 2596
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2597
			if ((rnp->qsmask & bit) != 0) {
2598
				if (f(per_cpu_ptr(&rcu_data, cpu)))
2599 2600
					mask |= bit;
			}
2601
		}
2602
		if (mask != 0) {
2603
			/* Idle/offline CPUs, report (releases rnp->lock). */
2604
			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2605 2606
		} else {
			/* Nothing to do here, so just drop the lock. */
Boqun Feng's avatar
Boqun Feng committed
2607
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2608 2609 2610 2611 2612 2613 2614 2615
		}
	}
}

/*
 * Force quiescent states on reluctant CPUs, and also detect which
 * CPUs are in dyntick-idle mode.
 */
2616
void rcu_force_quiescent_state(void)
2617 2618
{
	unsigned long flags;
2619 2620 2621 2622 2623
	bool ret;
	struct rcu_node *rnp;
	struct rcu_node *rnp_old = NULL;

	/* Funnel through hierarchy to reduce memory contention. */
2624
	rnp = __this_cpu_read(rcu_data.mynode);
2625
	for (; rnp != NULL; rnp = rnp->parent) {
2626
		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2627 2628 2629
		      !raw_spin_trylock(&rnp->fqslock);
		if (rnp_old != NULL)
			raw_spin_unlock(&rnp_old->fqslock);
2630
		if (ret)
2631 2632 2633
			return;
		rnp_old = rnp;
	}
2634
	/* rnp_old == rcu_get_root(), rnp == NULL. */
2635

2636
	/* Reached the root of the rcu_node tree, acquire lock. */
2637
	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2638
	raw_spin_unlock(&rnp_old->fqslock);
2639
	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
Boqun Feng's avatar
Boqun Feng committed
2640
		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2641
		return;  /* Someone beat us to it. */
2642
	}
2643 2644
	WRITE_ONCE(rcu_state.gp_flags,
		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
Boqun Feng's avatar
Boqun Feng committed
2645
	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2646
	rcu_gp_kthread_wake();
2647
}
2648
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2649

2650 2651 2652 2653
/*
 * This function checks for grace-period requests that fail to motivate
 * RCU to come out of its idle mode.
 */
2654 2655 2656
void
rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
			 const unsigned long gpssdelay)
2657 2658 2659
{
	unsigned long flags;
	unsigned long j;
2660
	struct rcu_node *rnp_root = rcu_get_root();
2661 2662
	static atomic_t warned = ATOMIC_INIT(0);

2663
	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
2664
	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
2665 2666
		return;
	j = jiffies; /* Expensive access, and in common case don't get here. */
2667 2668
	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
2669 2670 2671 2672 2673
	    atomic_read(&warned))
		return;

	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	j = jiffies;
2674
	if (rcu_gp_in_progress() ||
2675
	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
2676 2677
	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
2678 2679 2680 2681 2682 2683 2684 2685 2686
	    atomic_read(&warned)) {
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
		return;
	}
	/* Hold onto the leaf lock to make others see warned==1. */

	if (rnp_root != rnp)
		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
	j = jiffies;
2687
	if (rcu_gp_in_progress() ||
2688
	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
2689 2690
	    time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
	    time_before(j, rcu_state.gp_activity + gpssdelay) ||
2691 2692 2693 2694 2695 2696 2697 2698 2699
	    atomic_xchg(&warned, 1)) {
		raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
		return;
	}
	WARN_ON(1);
	if (rnp_root != rnp)
		raw_spin_unlock_rcu_node(rnp_root);
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2700
	show_rcu_gp_kthreads();
2701 2702
}

2703 2704 2705 2706 2707 2708 2709
/*
 * Do a forward-progress check for rcutorture.  This is normally invoked
 * due to an OOM event.  The argument "j" gives the time period during
 * which rcutorture would like progress to have been made.
 */
void rcu_fwd_progress_check(unsigned long j)
{
2710 2711 2712 2713
	unsigned long cbs;
	int cpu;
	unsigned long max_cbs = 0;
	int max_cpu = -1;
2714 2715 2716
	struct rcu_data *rdp;

	if (rcu_gp_in_progress()) {
2717 2718
		pr_info("%s: GP age %lu jiffies\n",
			__func__, jiffies - rcu_state.gp_start);
2719 2720
		show_rcu_gp_kthreads();
	} else {
2721 2722
		pr_info("%s: Last GP end %lu jiffies ago\n",
			__func__, jiffies - rcu_state.gp_end);
2723 2724 2725 2726 2727
		preempt_disable();
		rdp = this_cpu_ptr(&rcu_data);
		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
		preempt_enable();
	}
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741
	for_each_possible_cpu(cpu) {
		cbs = rcu_get_n_cbs_cpu(cpu);
		if (!cbs)
			continue;
		if (max_cpu < 0)
			pr_info("%s: callbacks", __func__);
		pr_cont(" %d: %lu", cpu, cbs);
		if (cbs <= max_cbs)
			continue;
		max_cbs = cbs;
		max_cpu = cpu;
	}
	if (max_cpu >= 0)
		pr_cont("\n");
2742 2743 2744
}
EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);

2745
/*
2746 2747 2748
 * This does the RCU core processing work for the specified rcu_data
 * structures.  This may be called only from the CPU to whom the rdp
 * belongs.
2749
 */
2750
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
2751 2752
{
	unsigned long flags;
2753
	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2754
	struct rcu_node *rnp = rdp->mynode;
2755

2756 2757 2758
	if (cpu_is_offline(smp_processor_id()))
		return;
	trace_rcu_utilization(TPS("Start RCU core"));
2759
	WARN_ON_ONCE(!rdp->beenonline);
2760

2761
	/* Report any deferred quiescent states if preemption enabled. */
2762
	if (!(preempt_count() & PREEMPT_MASK)) {
2763
		rcu_preempt_deferred_qs(current);
2764 2765 2766 2767
	} else if (rcu_preempt_need_deferred_qs(current)) {
		set_tsk_need_resched(current);
		set_preempt_need_resched();
	}
2768

2769
	/* Update RCU state based on any recent quiescent states. */
2770
	rcu_check_quiescent_state(rdp);
2771

2772
	/* No grace period and unregistered callbacks? */
2773
	if (!rcu_gp_in_progress() &&
2774 2775
	    rcu_segcblist_is_enabled(&rdp->cblist)) {
		local_irq_save(flags);
2776
		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2777
			rcu_accelerate_cbs_unlocked(rnp, rdp);
2778
		local_irq_restore(flags);
2779 2780
	}

2781
	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2782

2783
	/* If there are callbacks ready, invoke them. */
2784
	if (rcu_segcblist_ready_cbs(&rdp->cblist))
2785
		invoke_rcu_callbacks(rdp);
2786 2787 2788

	/* Do any needed deferred wakeups of rcuo kthreads. */
	do_nocb_deferred_wakeup(rdp);
2789
	trace_rcu_utilization(TPS("End RCU core"));
2790 2791
}

2792
/*
2793 2794 2795 2796 2797
 * Schedule RCU callback invocation.  If the running implementation of RCU
 * does not support RCU priority boosting, just do a direct call, otherwise
 * wake up the per-CPU kernel kthread.  Note that because we are running
 * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
 * cannot disappear out from under us.
2798
 */
2799
static void invoke_rcu_callbacks(struct rcu_data *rdp)
2800
{
2801
	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2802
		return;
2803
	if (likely(!rcu_state.boost)) {
2804
		rcu_do_batch(rdp);
2805 2806
		return;
	}
2807
	invoke_rcu_callbacks_kthread();
2808 2809
}

2810
static void invoke_rcu_core(void)
2811
{
2812 2813
	if (cpu_online(smp_processor_id()))
		raise_softirq(RCU_SOFTIRQ);
2814 2815
}

2816 2817 2818
/*
 * Handle any core-RCU processing required by a call_rcu() invocation.
 */
2819 2820
static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
			    unsigned long flags)
2821
{
2822 2823 2824 2825
	/*
	 * If called from an extended quiescent state, invoke the RCU
	 * core in order to force a re-evaluation of RCU's idleness.
	 */
2826
	if (!rcu_is_watching())
2827 2828
		invoke_rcu_core();

2829
	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2830
	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2831
		return;
2832

2833 2834
	/*
	 * Force the grace period if too many callbacks or too long waiting.
2835
	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2836
	 * if some other CPU has recently done so.  Also, don't bother
2837
	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2838 2839
	 * is the only one waiting for a grace period to complete.
	 */
2840 2841
	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
		     rdp->qlen_last_fqs_check + qhimark)) {
2842 2843

		/* Are we ignoring a completed grace period? */
2844
		note_gp_changes(rdp);
2845 2846

		/* Start a new grace period if one not already started. */
2847
		if (!rcu_gp_in_progress()) {
2848
			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2849 2850 2851
		} else {
			/* Give the grace period a kick. */
			rdp->blimit = LONG_MAX;
2852
			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2853
			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2854
				rcu_force_quiescent_state();
2855
			rdp->n_force_qs_snap = rcu_state.n_force_qs;
2856
			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2857
		}
2858
	}
2859 2860
}

2861 2862 2863 2864 2865 2866 2867
/*
 * RCU callback function to leak a callback.
 */
static void rcu_leak_callback(struct rcu_head *rhp)
{
}

2868 2869 2870
/*
 * Helper function for call_rcu() and friends.  The cpu argument will
 * normally be -1, indicating "currently running CPU".  It may specify
2871
 * a CPU only if that CPU is a no-CBs CPU.  Currently, only rcu_barrier()
2872 2873
 * is expected to specify a CPU.
 */
2874
static void
2875
__call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
2876 2877 2878 2879
{
	unsigned long flags;
	struct rcu_data *rdp;

2880 2881 2882
	/* Misaligned rcu_head! */
	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));

2883
	if (debug_rcu_head_queue(head)) {
2884 2885 2886 2887 2888 2889 2890
		/*
		 * Probable double call_rcu(), so leak the callback.
		 * Use rcu:rcu_callback trace event to find the previous
		 * time callback was passed to __call_rcu().
		 */
		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
			  head, head->func);
2891
		WRITE_ONCE(head->func, rcu_leak_callback);
2892 2893
		return;
	}
2894 2895 2896
	head->func = func;
	head->next = NULL;
	local_irq_save(flags);
2897
	rdp = this_cpu_ptr(&rcu_data);
2898 2899

	/* Add the callback to our list. */
2900
	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
2901 2902 2903
		int offline;

		if (cpu != -1)
2904
			rdp = per_cpu_ptr(&rcu_data, cpu);
2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
		if (likely(rdp->mynode)) {
			/* Post-boot, so this should be for a no-CBs CPU. */
			offline = !__call_rcu_nocb(rdp, head, lazy, flags);
			WARN_ON_ONCE(offline);
			/* Offline CPU, _call_rcu() illegal, leak callback.  */
			local_irq_restore(flags);
			return;
		}
		/*
		 * Very early boot, before rcu_init().  Initialize if needed
		 * and then drop through to queue the callback.
		 */
2917
		WARN_ON_ONCE(cpu != -1);
2918
		WARN_ON_ONCE(!rcu_is_watching());
2919 2920
		if (rcu_segcblist_empty(&rdp->cblist))
			rcu_segcblist_init(&rdp->cblist);
2921
	}
2922
	rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
2923
	if (__is_kfree_rcu_offset((unsigned long)func))
2924 2925
		trace_rcu_kfree_callback(rcu_state.name, head,
					 (unsigned long)func,
2926 2927
					 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
					 rcu_segcblist_n_cbs(&rdp->cblist));
2928
	else
2929
		trace_rcu_callback(rcu_state.name, head,
2930 2931
				   rcu_segcblist_n_lazy_cbs(&rdp->cblist),
				   rcu_segcblist_n_cbs(&rdp->cblist));
2932

2933
	/* Go handle any RCU core processing required. */
2934
	__call_rcu_core(rdp, head, flags);
2935 2936 2937
	local_irq_restore(flags);
}

2938
/**
2939
 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2940 2941 2942 2943
 * @head: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
 * period elapses, in other words after all pre-existing RCU read-side
 * critical sections have completed.  However, the callback function
 * might well execute concurrently with RCU read-side critical sections
 * that started after call_rcu() was invoked.  RCU read-side critical
 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
 * may be nested.  In addition, regions of code across which interrupts,
 * preemption, or softirqs have been disabled also serve as RCU read-side
 * critical sections.  This includes hardware interrupt handlers, softirq
 * handlers, and NMI handlers.
 *
 * Note that all CPUs must agree that the grace period extended beyond
 * all pre-existing RCU read-side critical section.  On systems with more
 * than one CPU, this means that when "func()" is invoked, each CPU is
 * guaranteed to have executed a full memory barrier since the end of its
 * last RCU read-side critical section whose beginning preceded the call
 * to call_rcu().  It also means that each CPU executing an RCU read-side
 * critical section that continues beyond the start of "func()" must have
 * executed a memory barrier after the call_rcu() but before the beginning
 * of that RCU read-side critical section.  Note that these guarantees
 * include CPUs that are offline, idle, or executing in user mode, as
 * well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
 * resulting RCU callback function "func()", then both CPU A and CPU B are
 * guaranteed to execute a full memory barrier during the time interval
 * between the call to call_rcu() and the invocation of "func()" -- even
 * if CPU A and CPU B are the same CPU (but again only if the system has
 * more than one CPU).
 */
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
2975
	__call_rcu(head, func, -1, 0);
2976 2977
}
EXPORT_SYMBOL_GPL(call_rcu);
2978

2979 2980 2981 2982 2983 2984 2985
/*
 * Queue an RCU callback for lazy invocation after a grace period.
 * This will likely be later named something like "call_rcu_lazy()",
 * but this change will require some way of tagging the lazy RCU
 * callbacks in the list of pending callbacks. Until then, this
 * function may only be called from __kfree_rcu().
 */
2986
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
2987
{
2988
	__call_rcu(head, func, -1, 1);
2989 2990 2991
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);

2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064
/*
 * During early boot, any blocking grace-period wait automatically
 * implies a grace period.  Later on, this is never the case for PREEMPT.
 *
 * Howevr, because a context switch is a grace period for !PREEMPT, any
 * blocking grace-period wait automatically implies a grace period if
 * there is only one CPU online at any point time during execution of
 * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
 * occasionally incorrectly indicate that there are multiple CPUs online
 * when there was in fact only one the whole time, as this just adds some
 * overhead: RCU still operates correctly.
 */
static int rcu_blocking_is_gp(void)
{
	int ret;

	if (IS_ENABLED(CONFIG_PREEMPT))
		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
	might_sleep();  /* Check for RCU read-side critical section. */
	preempt_disable();
	ret = num_online_cpus() <= 1;
	preempt_enable();
	return ret;
}

/**
 * synchronize_rcu - wait until a grace period has elapsed.
 *
 * Control will return to the caller some time after a full grace
 * period has elapsed, in other words after all currently executing RCU
 * read-side critical sections have completed.  Note, however, that
 * upon return from synchronize_rcu(), the caller might well be executing
 * concurrently with new RCU read-side critical sections that began while
 * synchronize_rcu() was waiting.  RCU read-side critical sections are
 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
 * In addition, regions of code across which interrupts, preemption, or
 * softirqs have been disabled also serve as RCU read-side critical
 * sections.  This includes hardware interrupt handlers, softirq handlers,
 * and NMI handlers.
 *
 * Note that this guarantee implies further memory-ordering guarantees.
 * On systems with more than one CPU, when synchronize_rcu() returns,
 * each CPU is guaranteed to have executed a full memory barrier since
 * the end of its last RCU read-side critical section whose beginning
 * preceded the call to synchronize_rcu().  In addition, each CPU having
 * an RCU read-side critical section that extends beyond the return from
 * synchronize_rcu() is guaranteed to have executed a full memory barrier
 * after the beginning of synchronize_rcu() and before the beginning of
 * that RCU read-side critical section.  Note that these guarantees include
 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
 * that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 * to have executed a full memory barrier during the execution of
 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
 * again only if the system has more than one CPU).
 */
void synchronize_rcu(void)
{
	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_rcu() in RCU read-side critical section");
	if (rcu_blocking_is_gp())
		return;
	if (rcu_gp_is_expedited())
		synchronize_rcu_expedited();
	else
		wait_rcu_gp(call_rcu);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);

3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
/**
 * get_state_synchronize_rcu - Snapshot current RCU state
 *
 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
 * to determine whether or not a full grace period has elapsed in the
 * meantime.
 */
unsigned long get_state_synchronize_rcu(void)
{
	/*
	 * Any prior manipulation of RCU-protected data must happen
3076
	 * before the load from ->gp_seq.
3077 3078
	 */
	smp_mb();  /* ^^^ */
3079
	return rcu_seq_snap(&rcu_state.gp_seq);
3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);

/**
 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
 *
 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
 *
 * If a full RCU grace period has elapsed since the earlier call to
 * get_state_synchronize_rcu(), just return.  Otherwise, invoke
 * synchronize_rcu() to wait for a full grace period.
 *
 * Yes, this function does not take counter wrap into account.  But
 * counter wrap is harmless.  If the counter wraps, we have waited for
 * more than 2 billion grace periods (and way more on a 64-bit system!),
 * so waiting for one additional grace period should be just fine.
 */
void cond_synchronize_rcu(unsigned long oldstate)
{
3099
	if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3100
		synchronize_rcu();
3101 3102
	else
		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3103 3104 3105
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);

3106
/*
3107
 * Check to see if there is any immediate RCU-related work to be done by
3108 3109 3110 3111
 * the current CPU, returning 1 if so and zero otherwise.  The checks are
 * in order of increasing expense: checks that can be carried out against
 * CPU-local state are performed first.  However, we must check for CPU
 * stalls first, else we might not get a chance.
3112
 */
3113
static int rcu_pending(void)
3114
{
3115
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3116 3117
	struct rcu_node *rnp = rdp->mynode;

3118
	/* Check for CPU stalls, if enabled. */
3119
	check_cpu_stall(rdp);
3120

3121
	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3122
	if (rcu_nohz_full_cpu())
3123 3124
		return 0;

3125
	/* Is the RCU core waiting for a quiescent state from this CPU? */
3126
	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
3127 3128 3129
		return 1;

	/* Does this CPU have callbacks ready to invoke? */
3130
	if (rcu_segcblist_ready_cbs(&rdp->cblist))
3131 3132 3133
		return 1;

	/* Has RCU gone idle with this CPU needing another grace period? */
3134
	if (!rcu_gp_in_progress() &&
3135 3136
	    rcu_segcblist_is_enabled(&rdp->cblist) &&
	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3137 3138
		return 1;

3139 3140
	/* Have RCU grace period completed or started?  */
	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3141
	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3142 3143
		return 1;

3144
	/* Does this CPU need a deferred NOCB wakeup? */
3145
	if (rcu_nocb_need_deferred_wakeup(rdp))
3146 3147
		return 1;

3148 3149 3150 3151
	/* nothing to do */
	return 0;
}

3152
/*
3153
 * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3154 3155
 * the compiler is expected to optimize this away.
 */
3156
static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3157
{
3158 3159
	trace_rcu_barrier(rcu_state.name, s, cpu,
			  atomic_read(&rcu_state.barrier_cpu_count), done);
3160 3161
}

3162
/*
3163 3164
 * RCU callback function for rcu_barrier().  If we are last, wake
 * up the task executing rcu_barrier().
3165
 */
3166
static void rcu_barrier_callback(struct rcu_head *rhp)
3167
{
3168
	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3169
		rcu_barrier_trace(TPS("LastCB"), -1,
3170 3171
				   rcu_state.barrier_sequence);
		complete(&rcu_state.barrier_completion);
3172
	} else {
3173
		rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
3174
	}
3175 3176 3177 3178 3179
}

/*
 * Called with preemption disabled, and from cross-cpu IRQ context.
 */
3180
static void rcu_barrier_func(void *unused)
3181
{
3182
	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
3183

3184
	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3185 3186 3187
	rdp->barrier_head.func = rcu_barrier_callback;
	debug_rcu_head_queue(&rdp->barrier_head);
	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
3188
		atomic_inc(&rcu_state.barrier_cpu_count);
3189 3190
	} else {
		debug_rcu_head_unqueue(&rdp->barrier_head);
3191
		rcu_barrier_trace(TPS("IRQNQ"), -1,
3192
				   rcu_state.barrier_sequence);
3193
	}
3194 3195
}

3196 3197 3198 3199 3200 3201 3202
/**
 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
 *
 * Note that this primitive does not necessarily wait for an RCU grace period
 * to complete.  For example, if there are no RCU callbacks queued anywhere
 * in the system, then rcu_barrier() is within its rights to return
 * immediately, without waiting for anything, much less an RCU grace period.
3203
 */
3204
void rcu_barrier(void)
3205
{
3206 3207
	int cpu;
	struct rcu_data *rdp;
3208
	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3209

3210
	rcu_barrier_trace(TPS("Begin"), -1, s);
3211

3212
	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3213
	mutex_lock(&rcu_state.barrier_mutex);
3214

3215
	/* Did someone else do our work for us? */
3216
	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3217
		rcu_barrier_trace(TPS("EarlyExit"), -1,
3218
				   rcu_state.barrier_sequence);
3219
		smp_mb(); /* caller's subsequent code after above check. */
3220
		mutex_unlock(&rcu_state.barrier_mutex);
3221 3222 3223
		return;
	}

3224
	/* Mark the start of the barrier operation. */
3225
	rcu_seq_start(&rcu_state.barrier_sequence);
3226
	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3227

3228
	/*
3229 3230
	 * Initialize the count to one rather than to zero in order to
	 * avoid a too-soon return to zero in case of a short grace period
3231 3232
	 * (or preemption of this task).  Exclude CPU-hotplug operations
	 * to ensure that no offline CPU has callbacks queued.
3233
	 */
3234 3235
	init_completion(&rcu_state.barrier_completion);
	atomic_set(&rcu_state.barrier_cpu_count, 1);
3236
	get_online_cpus();
3237 3238

	/*
3239 3240 3241
	 * Force each CPU with callbacks to register a new callback.
	 * When that callback is invoked, we will know that all of the
	 * corresponding CPU's preceding callbacks have been invoked.
3242
	 */
3243
	for_each_possible_cpu(cpu) {
3244
		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3245
			continue;
3246
		rdp = per_cpu_ptr(&rcu_data, cpu);
3247
		if (rcu_is_nocb_cpu(cpu)) {
3248
			if (!rcu_nocb_cpu_needs_barrier(cpu)) {
3249
				rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
3250
						   rcu_state.barrier_sequence);
3251
			} else {
3252
				rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
3253
						   rcu_state.barrier_sequence);
3254
				smp_mb__before_atomic();
3255
				atomic_inc(&rcu_state.barrier_cpu_count);
3256
				__call_rcu(&rdp->barrier_head,
3257
					   rcu_barrier_callback, cpu, 0);
3258
			}
3259
		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3260
			rcu_barrier_trace(TPS("OnlineQ"), cpu,
3261 3262
					   rcu_state.barrier_sequence);
			smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
3263
		} else {
3264
			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3265
					   rcu_state.barrier_sequence);
3266 3267
		}
	}
3268
	put_online_cpus();
3269 3270 3271 3272 3273

	/*
	 * Now that we have an rcu_barrier_callback() callback on each
	 * CPU, and thus each counted, remove the initial count.
	 */
3274 3275
	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
		complete(&rcu_state.barrier_completion);
3276 3277

	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3278
	wait_for_completion(&rcu_state.barrier_completion);
3279

3280
	/* Mark the end of the barrier operation. */
3281
	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3282
	rcu_seq_end(&rcu_state.barrier_sequence);
3283

3284
	/* Other rcu_barrier() invocations can now safely proceed. */
3285
	mutex_unlock(&rcu_state.barrier_mutex);
3286
}
3287
EXPORT_SYMBOL_GPL(rcu_barrier);
3288

3289 3290 3291 3292 3293 3294 3295 3296 3297
/*
 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
 * first CPU in a given leaf rcu_node structure coming online.  The caller
 * must hold the corresponding leaf rcu_node ->lock with interrrupts
 * disabled.
 */
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
{
	long mask;
3298
	long oldmask;
3299 3300
	struct rcu_node *rnp = rnp_leaf;

3301
	raw_lockdep_assert_held_rcu_node(rnp_leaf);
3302
	WARN_ON_ONCE(rnp->wait_blkd_tasks);
3303 3304 3305 3306 3307
	for (;;) {
		mask = rnp->grpmask;
		rnp = rnp->parent;
		if (rnp == NULL)
			return;
3308
		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3309
		oldmask = rnp->qsmaskinit;
3310
		rnp->qsmaskinit |= mask;
Boqun Feng's avatar
Boqun Feng committed
3311
		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3312 3313
		if (oldmask)
			return;
3314 3315 3316
	}
}

3317
/*
3318
 * Do boot-time initialization of a CPU's per-CPU RCU data.
3319
 */
3320
static void __init
3321
rcu_boot_init_percpu_data(int cpu)
3322
{
3323
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3324 3325

	/* Set up local state, ensuring consistent view of global state. */
3326
	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3327
	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
3328
	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3329
	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3330
	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3331
	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3332
	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3333
	rdp->cpu = cpu;
3334
	rcu_boot_init_nocb_percpu_data(rdp);
3335 3336 3337
}

/*
3338 3339 3340 3341
 * Invoked early in the CPU-online process, when pretty much all services
 * are available.  The incoming CPU is not present.
 *
 * Initializes a CPU's per-CPU RCU data.  Note that only one online or
3342 3343 3344
 * offline event can be happening at a given time.  Note also that we can
 * accept some slop in the rsp->gp_seq access due to the fact that this
 * CPU cannot possibly have any RCU callbacks in flight yet.
3345
 */
3346
int rcutree_prepare_cpu(unsigned int cpu)
3347 3348
{
	unsigned long flags;
3349
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3350
	struct rcu_node *rnp = rcu_get_root();
3351 3352

	/* Set up local state, ensuring consistent view of global state. */
3353
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
3354
	rdp->qlen_last_fqs_check = 0;
3355
	rdp->n_force_qs_snap = rcu_state.n_force_qs;
3356
	rdp->blimit = blimit;
3357 3358 3359
	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
	    !init_nocb_callback_list(rdp))
		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3360
	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
3361
	rcu_dynticks_eqs_online();
Boqun Feng's avatar
Boqun Feng committed
3362
	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
3363

3364 3365 3366 3367 3368
	/*
	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
	 * propagation up the rcu_node tree will happen at the beginning
	 * of the next grace period.
	 */
3369
	rnp = rdp->mynode;
3370
	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
3371
	rdp->beenonline = true;	 /* We have now been online. */
3372
	rdp->gp_seq = rnp->gp_seq;
3373
	rdp->gp_seq_needed = rnp->gp_seq;
3374
	rdp->cpu_no_qs.b.norm = true;
3375
	rdp->core_needs_qs = false;
3376
	rdp->rcu_iw_pending = false;
3377
	rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
3378
	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
Boqun Feng's avatar
Boqun Feng committed
3379
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3380
	rcu_prepare_kthreads(cpu);
3381
	rcu_spawn_cpu_nocb_kthread(cpu);
3382 3383 3384 3385

	return 0;
}

3386 3387 3388
/*
 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
 */
3389 3390
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
{
3391
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3392 3393 3394 3395

	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
}

3396 3397 3398 3399
/*
 * Near the end of the CPU-online process.  Pretty much all services
 * enabled, and the CPU is now very much alive.
 */
3400 3401
int rcutree_online_cpu(unsigned int cpu)
{
3402 3403 3404 3405
	unsigned long flags;
	struct rcu_data *rdp;
	struct rcu_node *rnp;

3406 3407 3408 3409 3410
	rdp = per_cpu_ptr(&rcu_data, cpu);
	rnp = rdp->mynode;
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	rnp->ffmask |= rdp->grpmask;
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3411 3412
	if (IS_ENABLED(CONFIG_TREE_SRCU))
		srcu_online_cpu(cpu);
3413 3414 3415 3416
	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
		return 0; /* Too early in boot for scheduler work. */
	sync_sched_exp_online_cleanup(cpu);
	rcutree_affinity_setting(cpu, -1);
3417 3418 3419
	return 0;
}

3420 3421 3422 3423
/*
 * Near the beginning of the process.  The CPU is still very much alive
 * with pretty much all services enabled.
 */
3424 3425
int rcutree_offline_cpu(unsigned int cpu)
{
3426 3427 3428 3429
	unsigned long flags;
	struct rcu_data *rdp;
	struct rcu_node *rnp;

3430 3431 3432 3433 3434
	rdp = per_cpu_ptr(&rcu_data, cpu);
	rnp = rdp->mynode;
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	rnp->ffmask &= ~rdp->grpmask;
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3435

3436
	rcutree_affinity_setting(cpu, cpu);
3437 3438
	if (IS_ENABLED(CONFIG_TREE_SRCU))
		srcu_offline_cpu(cpu);
3439 3440 3441
	return 0;
}

3442 3443
static DEFINE_PER_CPU(int, rcu_cpu_started);

3444 3445 3446 3447 3448 3449
/*
 * Mark the specified CPU as being online so that subsequent grace periods
 * (both expedited and normal) will wait on it.  Note that this means that
 * incoming CPUs are not allowed to use RCU read-side critical sections
 * until this function is called.  Failing to observe this restriction
 * will result in lockdep splats.
3450 3451 3452 3453
 *
 * Note that this function is special in that it is invoked directly
 * from the incoming CPU rather than from the cpuhp_step mechanism.
 * This is because this function must be invoked at a precise location.
3454 3455 3456 3457 3458
 */
void rcu_cpu_starting(unsigned int cpu)
{
	unsigned long flags;
	unsigned long mask;
3459 3460
	int nbits;
	unsigned long oldmask;
3461 3462 3463
	struct rcu_data *rdp;
	struct rcu_node *rnp;

3464 3465 3466 3467 3468
	if (per_cpu(rcu_cpu_started, cpu))
		return;

	per_cpu(rcu_cpu_started, cpu) = 1;

3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
	rdp = per_cpu_ptr(&rcu_data, cpu);
	rnp = rdp->mynode;
	mask = rdp->grpmask;
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	rnp->qsmaskinitnext |= mask;
	oldmask = rnp->expmaskinitnext;
	rnp->expmaskinitnext |= mask;
	oldmask ^= rnp->expmaskinitnext;
	nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
	/* Allow lockless access for expedited grace periods. */
3479
	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
3480
	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
3481 3482
	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
3483 3484 3485 3486 3487
	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
		/* Report QS -after- changing ->qsmaskinitnext! */
		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
	} else {
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3488
	}
3489
	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3490 3491
}

3492 3493
#ifdef CONFIG_HOTPLUG_CPU
/*
3494 3495 3496 3497 3498 3499
 * The outgoing function has no further need of RCU, so remove it from
 * the rcu_node tree's ->qsmaskinitnext bit masks.
 *
 * Note that this function is special in that it is invoked directly
 * from the outgoing CPU rather than from the cpuhp_step mechanism.
 * This is because this function must be invoked at a precise location.
3500
 */
3501
void rcu_report_dead(unsigned int cpu)
3502 3503 3504
{
	unsigned long flags;
	unsigned long mask;
3505
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3506 3507
	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */

3508
	/* QS for any half-done expedited grace period. */
3509
	preempt_disable();
3510
	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
3511 3512 3513
	preempt_enable();
	rcu_preempt_deferred_qs(current);

3514 3515
	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
	mask = rdp->grpmask;
3516
	raw_spin_lock(&rcu_state.ofl_lock);
3517
	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
3518 3519
	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
3520 3521
	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
		/* Report quiescent state -before- changing ->qsmaskinitnext! */
3522
		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
3523 3524
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
	}
3525
	rnp->qsmaskinitnext &= ~mask;
3526
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3527
	raw_spin_unlock(&rcu_state.ofl_lock);
3528 3529

	per_cpu(rcu_cpu_started, cpu) = 0;
3530
}
3531

3532 3533 3534 3535 3536 3537
/*
 * The outgoing CPU has just passed through the dying-idle state, and we
 * are being invoked from the CPU that was IPIed to continue the offline
 * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
 */
void rcutree_migrate_callbacks(int cpu)
3538 3539
{
	unsigned long flags;
3540
	struct rcu_data *my_rdp;
3541
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3542
	struct rcu_node *rnp_root = rcu_get_root();
3543
	bool needwake;
3544

3545 3546 3547
	if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
		return;  /* No callbacks to migrate. */

3548
	local_irq_save(flags);
3549
	my_rdp = this_cpu_ptr(&rcu_data);
3550 3551 3552 3553
	if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
		local_irq_restore(flags);
		return;
	}
3554
	raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
3555
	/* Leverage recent GPs and set GP for new callbacks. */
3556 3557
	needwake = rcu_advance_cbs(rnp_root, rdp) ||
		   rcu_advance_cbs(rnp_root, my_rdp);
3558
	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3559 3560
	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
3561
	raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3562
	if (needwake)
3563
		rcu_gp_kthread_wake();
3564 3565 3566 3567 3568 3569
	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
		  !rcu_segcblist_empty(&rdp->cblist),
		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
		  rcu_segcblist_first_cb(&rdp->cblist));
}
3570 3571
#endif

3572 3573 3574 3575
/*
 * On non-huge systems, use expedited RCU grace periods to make suspend
 * and hibernation run faster.
 */
3576 3577 3578 3579 3580 3581 3582
static int rcu_pm_notify(struct notifier_block *self,
			 unsigned long action, void *hcpu)
{
	switch (action) {
	case PM_HIBERNATION_PREPARE:
	case PM_SUSPEND_PREPARE:
		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3583
			rcu_expedite_gp();
3584 3585 3586
		break;
	case PM_POST_HIBERNATION:
	case PM_POST_SUSPEND:
3587 3588
		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
			rcu_unexpedite_gp();
3589 3590 3591 3592 3593 3594 3595
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}

3596
/*
3597
 * Spawn the kthreads that handle RCU's grace periods.
3598 3599 3600 3601
 */
static int __init rcu_spawn_gp_kthread(void)
{
	unsigned long flags;
3602
	int kthread_prio_in = kthread_prio;
3603
	struct rcu_node *rnp;
3604
	struct sched_param sp;
3605 3606
	struct task_struct *t;

3607
	/* Force priority into range. */
3608 3609 3610 3611
	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
		kthread_prio = 2;
	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3612 3613 3614 3615 3616
		kthread_prio = 1;
	else if (kthread_prio < 0)
		kthread_prio = 0;
	else if (kthread_prio > 99)
		kthread_prio = 99;
3617

3618 3619 3620 3621
	if (kthread_prio != kthread_prio_in)
		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
			 kthread_prio, kthread_prio_in);

3622
	rcu_scheduler_fully_active = 1;
3623
	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
3624 3625
	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
		return 0;
3626 3627 3628 3629 3630 3631
	rnp = rcu_get_root();
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	rcu_state.gp_kthread = t;
	if (kthread_prio) {
		sp.sched_priority = kthread_prio;
		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3632
	}
3633 3634
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	wake_up_process(t);
3635
	rcu_spawn_nocb_kthreads();
3636
	rcu_spawn_boost_kthreads();
3637 3638 3639 3640
	return 0;
}
early_initcall(rcu_spawn_gp_kthread);

3641
/*
3642 3643 3644 3645 3646 3647
 * This function is invoked towards the end of the scheduler's
 * initialization process.  Before this is called, the idle task might
 * contain synchronous grace-period primitives (during which time, this idle
 * task is booting the system, and such primitives are no-ops).  After this
 * function is called, any synchronous grace-period primitives are run as
 * expedited, with the requesting task driving the grace period forward.
3648
 * A later core_initcall() rcu_set_runtime_mode() will switch to full
3649
 * runtime RCU functionality.
3650 3651 3652 3653 3654
 */
void rcu_scheduler_starting(void)
{
	WARN_ON(num_online_cpus() != 1);
	WARN_ON(nr_context_switches() > 0);
3655 3656 3657
	rcu_test_sync_prims();
	rcu_scheduler_active = RCU_SCHEDULER_INIT;
	rcu_test_sync_prims();
3658 3659
}

3660
/*
3661
 * Helper function for rcu_init() that initializes the rcu_state structure.
3662
 */
3663
static void __init rcu_init_one(void)
3664
{
3665 3666
	static const char * const buf[] = RCU_NODE_NAME_INIT;
	static const char * const fqs[] = RCU_FQS_NAME_INIT;
3667 3668
	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
3669 3670

	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
3671 3672 3673 3674 3675
	int cpustride = 1;
	int i;
	int j;
	struct rcu_node *rnp;

3676
	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3677

3678 3679 3680
	/* Silence gcc 4.8 false positive about array index out of range. */
	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
		panic("rcu_init_one: rcu_num_lvls out of range");
3681

3682 3683
	/* Initialize the level-tracking arrays. */

3684
	for (i = 1; i < rcu_num_lvls; i++)
3685 3686
		rcu_state.level[i] =
			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
3687
	rcu_init_levelspread(levelspread, num_rcu_lvl);
3688 3689 3690

	/* Initialize the elements themselves, starting from the leaves. */

3691
	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3692
		cpustride *= levelspread[i];
3693
		rnp = rcu_state.level[i];
3694
		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
Boqun Feng's avatar
Boqun Feng committed
3695 3696
			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
3697
						   &rcu_node_class[i], buf[i]);
3698 3699 3700
			raw_spin_lock_init(&rnp->fqslock);
			lockdep_set_class_and_name(&rnp->fqslock,
						   &rcu_fqs_class[i], fqs[i]);
3701 3702 3703
			rnp->gp_seq = rcu_state.gp_seq;
			rnp->gp_seq_needed = rcu_state.gp_seq;
			rnp->completedqs = rcu_state.gp_seq;
3704 3705 3706 3707
			rnp->qsmask = 0;
			rnp->qsmaskinit = 0;
			rnp->grplo = j * cpustride;
			rnp->grphi = (j + 1) * cpustride - 1;
3708 3709
			if (rnp->grphi >= nr_cpu_ids)
				rnp->grphi = nr_cpu_ids - 1;
3710 3711 3712 3713 3714
			if (i == 0) {
				rnp->grpnum = 0;
				rnp->grpmask = 0;
				rnp->parent = NULL;
			} else {
3715
				rnp->grpnum = j % levelspread[i - 1];
3716
				rnp->grpmask = BIT(rnp->grpnum);
3717
				rnp->parent = rcu_state.level[i - 1] +
3718
					      j / levelspread[i - 1];
3719 3720
			}
			rnp->level = i;
3721
			INIT_LIST_HEAD(&rnp->blkd_tasks);
3722
			rcu_init_one_nocb(rnp);
3723 3724
			init_waitqueue_head(&rnp->exp_wq[0]);
			init_waitqueue_head(&rnp->exp_wq[1]);
3725 3726
			init_waitqueue_head(&rnp->exp_wq[2]);
			init_waitqueue_head(&rnp->exp_wq[3]);
3727
			spin_lock_init(&rnp->exp_lock);
3728 3729
		}
	}
3730

3731 3732
	init_swait_queue_head(&rcu_state.gp_wq);
	init_swait_queue_head(&rcu_state.expedited_wq);
3733
	rnp = rcu_first_leaf_node();
3734
	for_each_possible_cpu(i) {
3735
		while (i > rnp->grphi)
3736
			rnp++;
3737
		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
3738
		rcu_boot_init_percpu_data(i);
3739
	}
3740 3741
}

3742 3743
/*
 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3744
 * replace the definitions in tree.h because those are needed to size
3745 3746 3747 3748
 * the ->node array in the rcu_state structure.
 */
static void __init rcu_init_geometry(void)
{
3749
	ulong d;
3750
	int i;
3751
	int rcu_capacity[RCU_NUM_LVLS];
3752

3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764
	/*
	 * Initialize any unspecified boot parameters.
	 * The default values of jiffies_till_first_fqs and
	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
	 * value, which is a function of HZ, then adding one for each
	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
	 */
	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
	if (jiffies_till_first_fqs == ULONG_MAX)
		jiffies_till_first_fqs = d;
	if (jiffies_till_next_fqs == ULONG_MAX)
		jiffies_till_next_fqs = d;
3765 3766
	if (jiffies_till_sched_qs == ULONG_MAX)
		adjust_jiffies_till_sched_qs();
3767

3768
	/* If the compile-time values are accurate, just leave. */
3769
	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
3770
	    nr_cpu_ids == NR_CPUS)
3771
		return;
3772
	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
3773
		rcu_fanout_leaf, nr_cpu_ids);
3774 3775

	/*
3776 3777 3778 3779
	 * The boot-time rcu_fanout_leaf parameter must be at least two
	 * and cannot exceed the number of bits in the rcu_node masks.
	 * Complain and fall back to the compile-time values if this
	 * limit is exceeded.
3780
	 */
3781
	if (rcu_fanout_leaf < 2 ||
3782
	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
3783
		rcu_fanout_leaf = RCU_FANOUT_LEAF;
3784 3785 3786 3787 3788 3789
		WARN_ON(1);
		return;
	}

	/*
	 * Compute number of nodes that can be handled an rcu_node tree
3790
	 * with the given number of levels.
3791
	 */
3792
	rcu_capacity[0] = rcu_fanout_leaf;
3793
	for (i = 1; i < RCU_NUM_LVLS; i++)
3794
		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
3795 3796

	/*
3797
	 * The tree must be able to accommodate the configured number of CPUs.
3798
	 * If this limit is exceeded, fall back to the compile-time values.
3799
	 */
3800 3801 3802 3803 3804
	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
		rcu_fanout_leaf = RCU_FANOUT_LEAF;
		WARN_ON(1);
		return;
	}
3805

3806
	/* Calculate the number of levels in the tree. */
3807
	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
3808
	}
3809
	rcu_num_lvls = i + 1;
3810

3811
	/* Calculate the number of rcu_nodes at each level of the tree. */
3812
	for (i = 0; i < rcu_num_lvls; i++) {
3813
		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
3814 3815
		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
	}
3816 3817 3818

	/* Calculate the total number of rcu_node structures. */
	rcu_num_nodes = 0;
3819
	for (i = 0; i < rcu_num_lvls; i++)
3820 3821 3822
		rcu_num_nodes += num_rcu_lvl[i];
}

3823 3824
/*
 * Dump out the structure of the rcu_node combining tree associated
3825
 * with the rcu_state structure.
3826
 */
3827
static void __init rcu_dump_rcu_node_tree(void)
3828 3829 3830 3831 3832 3833
{
	int level = 0;
	struct rcu_node *rnp;

	pr_info("rcu_node tree layout dump\n");
	pr_info(" ");
3834
	rcu_for_each_node_breadth_first(rnp) {
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
		if (rnp->level != level) {
			pr_cont("\n");
			pr_info(" ");
			level = rnp->level;
		}
		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
	}
	pr_cont("\n");
}

3845
struct workqueue_struct *rcu_gp_wq;
3846
struct workqueue_struct *rcu_par_gp_wq;
3847

3848
void __init rcu_init(void)
3849
{
Paul E. McKenney's avatar
Paul E. McKenney committed
3850
	int cpu;
3851

3852 3853
	rcu_early_boot_tests();

3854
	rcu_bootup_announce();
3855
	rcu_init_geometry();
3856
	rcu_init_one();
3857
	if (dump_tree)
3858
		rcu_dump_rcu_node_tree();
Jiang Fang's avatar
Jiang Fang committed
3859
	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3860 3861 3862 3863 3864 3865

	/*
	 * We don't need protection against CPU-hotplug here because
	 * this is called early in boot, before either interrupts
	 * or the scheduler are operational.
	 */
3866
	pm_notifier(rcu_pm_notify, 0);
3867
	for_each_online_cpu(cpu) {
3868
		rcutree_prepare_cpu(cpu);
3869
		rcu_cpu_starting(cpu);
3870
		rcutree_online_cpu(cpu);
3871
	}
3872 3873 3874 3875

	/* Create workqueue for expedited GPs and for Tree SRCU. */
	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
	WARN_ON(!rcu_gp_wq);
3876 3877
	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
	WARN_ON(!rcu_par_gp_wq);
3878
	srcu_init();
3879 3880
}

3881
#include "tree_exp.h"
3882
#include "tree_plugin.h"