Commit 3c779dfe authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Eliminate callback-invocation/invocation use of rsp

Now that there is only one rcu_state structure, there is less point in
maintaining a pointer to it.  This commit therefore replaces rsp with
&rcu_state in rcu_do_batch(), invoke_rcu_callbacks(), and __call_rcu().
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 9cbc5b97
...@@ -2467,14 +2467,13 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2467,14 +2467,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
struct rcu_head *rhp; struct rcu_head *rhp;
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
long bl, count; long bl, count;
struct rcu_state *rsp = &rcu_state;
/* If no callbacks are ready, just return. */ /* If no callbacks are ready, just return. */
if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
trace_rcu_batch_start(rsp->name, trace_rcu_batch_start(rcu_state.name,
rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist), 0); rcu_segcblist_n_cbs(&rdp->cblist), 0);
trace_rcu_batch_end(rsp->name, 0, trace_rcu_batch_end(rcu_state.name, 0,
!rcu_segcblist_empty(&rdp->cblist), !rcu_segcblist_empty(&rdp->cblist),
need_resched(), is_idle_task(current), need_resched(), is_idle_task(current),
rcu_is_callbacks_kthread()); rcu_is_callbacks_kthread());
...@@ -2489,7 +2488,8 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2489,7 +2488,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
local_irq_save(flags); local_irq_save(flags);
WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
bl = rdp->blimit; bl = rdp->blimit;
trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist), trace_rcu_batch_start(rcu_state.name,
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist), bl); rcu_segcblist_n_cbs(&rdp->cblist), bl);
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -2498,7 +2498,7 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2498,7 +2498,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
rhp = rcu_cblist_dequeue(&rcl); rhp = rcu_cblist_dequeue(&rcl);
for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
debug_rcu_head_unqueue(rhp); debug_rcu_head_unqueue(rhp);
if (__rcu_reclaim(rsp->name, rhp)) if (__rcu_reclaim(rcu_state.name, rhp))
rcu_cblist_dequeued_lazy(&rcl); rcu_cblist_dequeued_lazy(&rcl);
/* /*
* Stop only if limit reached and CPU has something to do. * Stop only if limit reached and CPU has something to do.
...@@ -2512,7 +2512,7 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2512,7 +2512,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
local_irq_save(flags); local_irq_save(flags);
count = -rcl.len; count = -rcl.len;
trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(), trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
is_idle_task(current), rcu_is_callbacks_kthread()); is_idle_task(current), rcu_is_callbacks_kthread());
/* Update counts and requeue any remaining callbacks. */ /* Update counts and requeue any remaining callbacks. */
...@@ -2528,7 +2528,7 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2528,7 +2528,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
if (count == 0 && rdp->qlen_last_fqs_check != 0) { if (count == 0 && rdp->qlen_last_fqs_check != 0) {
rdp->qlen_last_fqs_check = 0; rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs; rdp->n_force_qs_snap = rcu_state.n_force_qs;
} else if (count < rdp->qlen_last_fqs_check - qhimark) } else if (count < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = count; rdp->qlen_last_fqs_check = count;
...@@ -2764,11 +2764,9 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused ...@@ -2764,11 +2764,9 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
*/ */
static void invoke_rcu_callbacks(struct rcu_data *rdp) static void invoke_rcu_callbacks(struct rcu_data *rdp)
{ {
struct rcu_state *rsp = &rcu_state;
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return; return;
if (likely(!rsp->boost)) { if (likely(!rcu_state.boost)) {
rcu_do_batch(rdp); rcu_do_batch(rdp);
return; return;
} }
...@@ -2844,7 +2842,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) ...@@ -2844,7 +2842,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
{ {
unsigned long flags; unsigned long flags;
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_state __maybe_unused *rsp = &rcu_state;
/* Misaligned rcu_head! */ /* Misaligned rcu_head! */
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
...@@ -2893,11 +2890,12 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) ...@@ -2893,11 +2890,12 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
rcu_idle_count_callbacks_posted(); rcu_idle_count_callbacks_posted();
if (__is_kfree_rcu_offset((unsigned long)func)) if (__is_kfree_rcu_offset((unsigned long)func))
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, trace_rcu_kfree_callback(rcu_state.name, head,
(unsigned long)func,
rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist)); rcu_segcblist_n_cbs(&rdp->cblist));
else else
trace_rcu_callback(rsp->name, head, trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist)); rcu_segcblist_n_cbs(&rdp->cblist));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment