Commit ff5c4f5c authored by Thomas Gleixner's avatar Thomas Gleixner

rcu/tree: Mark the idle relevant functions noinstr

These functions are invoked from context tracking and other places in the
low level entry code. Move them into the .noinstr.text section to exclude
them from instrumentation.

Mark the places which are safe to invoke traceable functions with
instrumentation_begin/end() so objtool won't complain.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Link: https://lkml.kernel.org/r/20200505134100.575356107@linutronix.de
parent 0d00449c
...@@ -88,9 +88,6 @@ ...@@ -88,9 +88,6 @@
*/ */
#define RCU_DYNTICK_CTRL_MASK 0x1 #define RCU_DYNTICK_CTRL_MASK 0x1
#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
#ifndef rcu_eqs_special_exit
#define rcu_eqs_special_exit() do { } while (0)
#endif
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.dynticks_nesting = 1, .dynticks_nesting = 1,
...@@ -242,7 +239,7 @@ void rcu_softirq_qs(void) ...@@ -242,7 +239,7 @@ void rcu_softirq_qs(void)
* RCU is watching prior to the call to this function and is no longer * RCU is watching prior to the call to this function and is no longer
* watching upon return. * watching upon return.
*/ */
static void rcu_dynticks_eqs_enter(void) static noinstr void rcu_dynticks_eqs_enter(void)
{ {
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
int seq; int seq;
...@@ -267,7 +264,7 @@ static void rcu_dynticks_eqs_enter(void) ...@@ -267,7 +264,7 @@ static void rcu_dynticks_eqs_enter(void)
* called from an extended quiescent state, that is, RCU is not watching * called from an extended quiescent state, that is, RCU is not watching
* prior to the call to this function and is watching upon return. * prior to the call to this function and is watching upon return.
*/ */
static void rcu_dynticks_eqs_exit(void) static noinstr void rcu_dynticks_eqs_exit(void)
{ {
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
int seq; int seq;
...@@ -285,8 +282,6 @@ static void rcu_dynticks_eqs_exit(void) ...@@ -285,8 +282,6 @@ static void rcu_dynticks_eqs_exit(void)
if (seq & RCU_DYNTICK_CTRL_MASK) { if (seq & RCU_DYNTICK_CTRL_MASK) {
atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
smp_mb__after_atomic(); /* _exit after clearing mask. */ smp_mb__after_atomic(); /* _exit after clearing mask. */
/* Prefer duplicate flushes to losing a flush. */
rcu_eqs_special_exit();
} }
} }
...@@ -314,7 +309,7 @@ static void rcu_dynticks_eqs_online(void) ...@@ -314,7 +309,7 @@ static void rcu_dynticks_eqs_online(void)
* *
* No ordering, as we are sampling CPU-local information. * No ordering, as we are sampling CPU-local information.
*/ */
static bool rcu_dynticks_curr_cpu_in_eqs(void) static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{ {
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
...@@ -603,7 +598,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); ...@@ -603,7 +598,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
* the possibility of usermode upcalls having messed up our count * the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period. * of interrupt nesting level during the prior busy period.
*/ */
static void rcu_eqs_enter(bool user) static noinstr void rcu_eqs_enter(bool user)
{ {
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
...@@ -618,12 +613,14 @@ static void rcu_eqs_enter(bool user) ...@@ -618,12 +613,14 @@ static void rcu_eqs_enter(bool user)
} }
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
instrumentation_begin();
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rdp = this_cpu_ptr(&rcu_data); rdp = this_cpu_ptr(&rcu_data);
do_nocb_deferred_wakeup(rdp); do_nocb_deferred_wakeup(rdp);
rcu_prepare_for_idle(); rcu_prepare_for_idle();
rcu_preempt_deferred_qs(current); rcu_preempt_deferred_qs(current);
instrumentation_end();
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
// RCU is watching here ... // RCU is watching here ...
rcu_dynticks_eqs_enter(); rcu_dynticks_eqs_enter();
...@@ -660,7 +657,7 @@ void rcu_idle_enter(void) ...@@ -660,7 +657,7 @@ void rcu_idle_enter(void)
* If you add or remove a call to rcu_user_enter(), be sure to test with * If you add or remove a call to rcu_user_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y. * CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_user_enter(void) noinstr void rcu_user_enter(void)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
rcu_eqs_enter(true); rcu_eqs_enter(true);
...@@ -693,19 +690,23 @@ static __always_inline void rcu_nmi_exit_common(bool irq) ...@@ -693,19 +690,23 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
* leave it in non-RCU-idle state. * leave it in non-RCU-idle state.
*/ */
if (rdp->dynticks_nmi_nesting != 1) { if (rdp->dynticks_nmi_nesting != 1) {
instrumentation_begin();
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
atomic_read(&rdp->dynticks)); atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
rdp->dynticks_nmi_nesting - 2); rdp->dynticks_nmi_nesting - 2);
instrumentation_end();
return; return;
} }
instrumentation_begin();
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
if (irq) if (irq)
rcu_prepare_for_idle(); rcu_prepare_for_idle();
instrumentation_end();
// RCU is watching here ... // RCU is watching here ...
rcu_dynticks_eqs_enter(); rcu_dynticks_eqs_enter();
...@@ -721,7 +722,7 @@ static __always_inline void rcu_nmi_exit_common(bool irq) ...@@ -721,7 +722,7 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
* If you add or remove a call to rcu_nmi_exit(), be sure to test * If you add or remove a call to rcu_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y. * with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_nmi_exit(void) void noinstr rcu_nmi_exit(void)
{ {
rcu_nmi_exit_common(false); rcu_nmi_exit_common(false);
} }
...@@ -745,7 +746,7 @@ void rcu_nmi_exit(void) ...@@ -745,7 +746,7 @@ void rcu_nmi_exit(void)
* If you add or remove a call to rcu_irq_exit(), be sure to test with * If you add or remove a call to rcu_irq_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y. * CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_irq_exit(void) void noinstr rcu_irq_exit(void)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
rcu_nmi_exit_common(true); rcu_nmi_exit_common(true);
...@@ -774,7 +775,7 @@ void rcu_irq_exit_irqson(void) ...@@ -774,7 +775,7 @@ void rcu_irq_exit_irqson(void)
* allow for the possibility of usermode upcalls messing up our count of * allow for the possibility of usermode upcalls messing up our count of
* interrupt nesting level during the busy period that is just now starting. * interrupt nesting level during the busy period that is just now starting.
*/ */
static void rcu_eqs_exit(bool user) static void noinstr rcu_eqs_exit(bool user)
{ {
struct rcu_data *rdp; struct rcu_data *rdp;
long oldval; long oldval;
...@@ -792,12 +793,14 @@ static void rcu_eqs_exit(bool user) ...@@ -792,12 +793,14 @@ static void rcu_eqs_exit(bool user)
// RCU is not watching here ... // RCU is not watching here ...
rcu_dynticks_eqs_exit(); rcu_dynticks_eqs_exit();
// ... but is watching here. // ... but is watching here.
instrumentation_begin();
rcu_cleanup_after_idle(); rcu_cleanup_after_idle();
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(rdp->dynticks_nesting, 1); WRITE_ONCE(rdp->dynticks_nesting, 1);
WARN_ON_ONCE(rdp->dynticks_nmi_nesting); WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
instrumentation_end();
} }
/** /**
...@@ -828,7 +831,7 @@ void rcu_idle_exit(void) ...@@ -828,7 +831,7 @@ void rcu_idle_exit(void)
* If you add or remove a call to rcu_user_exit(), be sure to test with * If you add or remove a call to rcu_user_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y. * CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_user_exit(void) void noinstr rcu_user_exit(void)
{ {
rcu_eqs_exit(1); rcu_eqs_exit(1);
} }
...@@ -876,28 +879,35 @@ static __always_inline void rcu_nmi_enter_common(bool irq) ...@@ -876,28 +879,35 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
rcu_cleanup_after_idle(); rcu_cleanup_after_idle();
incby = 1; incby = 1;
} else if (irq && tick_nohz_full_cpu(rdp->cpu) && } else if (irq) {
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE && instrumentation_begin();
READ_ONCE(rdp->rcu_urgent_qs) && if (tick_nohz_full_cpu(rdp->cpu) &&
!READ_ONCE(rdp->rcu_forced_tick)) { rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
// We get here only if we had already exited the extended READ_ONCE(rdp->rcu_urgent_qs) &&
// quiescent state and this was an interrupt (not an NMI). !READ_ONCE(rdp->rcu_forced_tick)) {
// Therefore, (1) RCU is already watching and (2) The fact // We get here only if we had already exited the
// that we are in an interrupt handler and that the rcu_node // extended quiescent state and this was an
// lock is an irq-disabled lock prevents self-deadlock. // interrupt (not an NMI). Therefore, (1) RCU is
// So we can safely recheck under the lock. // already watching and (2) The fact that we are in
raw_spin_lock_rcu_node(rdp->mynode); // an interrupt handler and that the rcu_node lock
if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { // is an irq-disabled lock prevents self-deadlock.
// A nohz_full CPU is in the kernel and RCU // So we can safely recheck under the lock.
// needs a quiescent state. Turn on the tick! raw_spin_lock_rcu_node(rdp->mynode);
WRITE_ONCE(rdp->rcu_forced_tick, true); if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); // A nohz_full CPU is in the kernel and RCU
// needs a quiescent state. Turn on the tick!
WRITE_ONCE(rdp->rcu_forced_tick, true);
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
}
raw_spin_unlock_rcu_node(rdp->mynode);
} }
raw_spin_unlock_rcu_node(rdp->mynode); instrumentation_end();
} }
instrumentation_begin();
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting,
rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
instrumentation_end();
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
rdp->dynticks_nmi_nesting + incby); rdp->dynticks_nmi_nesting + incby);
barrier(); barrier();
...@@ -906,11 +916,10 @@ static __always_inline void rcu_nmi_enter_common(bool irq) ...@@ -906,11 +916,10 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
/** /**
* rcu_nmi_enter - inform RCU of entry to NMI context * rcu_nmi_enter - inform RCU of entry to NMI context
*/ */
void rcu_nmi_enter(void) noinstr void rcu_nmi_enter(void)
{ {
rcu_nmi_enter_common(false); rcu_nmi_enter_common(false);
} }
NOKPROBE_SYMBOL(rcu_nmi_enter);
/** /**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
...@@ -934,7 +943,7 @@ NOKPROBE_SYMBOL(rcu_nmi_enter); ...@@ -934,7 +943,7 @@ NOKPROBE_SYMBOL(rcu_nmi_enter);
* If you add or remove a call to rcu_irq_enter(), be sure to test with * If you add or remove a call to rcu_irq_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y. * CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_irq_enter(void) noinstr void rcu_irq_enter(void)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
rcu_nmi_enter_common(true); rcu_nmi_enter_common(true);
...@@ -979,7 +988,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) ...@@ -979,7 +988,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
* if the current CPU is not in its idle loop or is in an interrupt or * if the current CPU is not in its idle loop or is in an interrupt or
* NMI handler, return true. * NMI handler, return true.
*/ */
bool notrace rcu_is_watching(void) bool rcu_is_watching(void)
{ {
bool ret; bool ret;
...@@ -1031,12 +1040,12 @@ bool rcu_lockdep_current_cpu_online(void) ...@@ -1031,12 +1040,12 @@ bool rcu_lockdep_current_cpu_online(void)
if (in_nmi() || !rcu_scheduler_fully_active) if (in_nmi() || !rcu_scheduler_fully_active)
return true; return true;
preempt_disable(); preempt_disable_notrace();
rdp = this_cpu_ptr(&rcu_data); rdp = this_cpu_ptr(&rcu_data);
rnp = rdp->mynode; rnp = rdp->mynode;
if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
ret = true; ret = true;
preempt_enable(); preempt_enable_notrace();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
......
...@@ -2539,7 +2539,7 @@ static void rcu_bind_gp_kthread(void) ...@@ -2539,7 +2539,7 @@ static void rcu_bind_gp_kthread(void)
} }
/* Record the current task on dyntick-idle entry. */ /* Record the current task on dyntick-idle entry. */
static void rcu_dynticks_task_enter(void) static void noinstr rcu_dynticks_task_enter(void)
{ {
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
...@@ -2547,7 +2547,7 @@ static void rcu_dynticks_task_enter(void) ...@@ -2547,7 +2547,7 @@ static void rcu_dynticks_task_enter(void)
} }
/* Record no current task on dyntick-idle exit. */ /* Record no current task on dyntick-idle exit. */
static void rcu_dynticks_task_exit(void) static void noinstr rcu_dynticks_task_exit(void)
{ {
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
......
...@@ -284,13 +284,12 @@ struct lockdep_map rcu_callback_map = ...@@ -284,13 +284,12 @@ struct lockdep_map rcu_callback_map =
STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
EXPORT_SYMBOL_GPL(rcu_callback_map); EXPORT_SYMBOL_GPL(rcu_callback_map);
int notrace debug_lockdep_rcu_enabled(void) noinstr int notrace debug_lockdep_rcu_enabled(void)
{ {
return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
current->lockdep_recursion == 0; current->lockdep_recursion == 0;
} }
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
/** /**
* rcu_read_lock_held() - might we be in RCU read-side critical section? * rcu_read_lock_held() - might we be in RCU read-side critical section?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment