Commit 37c72e56 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Ingo Molnar

rcu: Prevent RCU IPI storms in presence of high call_rcu() load

As the number of callbacks on a given CPU rises, invoke
force_quiescent_state() only every blimit number of callbacks
(defaults to 10,000), and even then only if no other CPU has
invoked force_quiescent_state() in the meantime.

This should fix the performance regression reported by Nick.
Reported-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
Cc: jens.axboe@oracle.com
LKML-Reference: <12555405592133-git-send-email->
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2bc87203
...@@ -958,7 +958,7 @@ static void rcu_offline_cpu(int cpu) ...@@ -958,7 +958,7 @@ static void rcu_offline_cpu(int cpu)
* Invoke any RCU callbacks that have made it to the end of their grace * Invoke any RCU callbacks that have made it to the end of their grace
* period. Thottle as specified by rdp->blimit. * period. Thottle as specified by rdp->blimit.
*/ */
static void rcu_do_batch(struct rcu_data *rdp) static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{ {
unsigned long flags; unsigned long flags;
struct rcu_head *next, *list, **tail; struct rcu_head *next, *list, **tail;
...@@ -1011,6 +1011,13 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -1011,6 +1011,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
rdp->blimit = blimit; rdp->blimit = blimit;
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = rdp->qlen;
local_irq_restore(flags); local_irq_restore(flags);
/* Re-raise the RCU softirq if there are callbacks remaining. */ /* Re-raise the RCU softirq if there are callbacks remaining. */
...@@ -1224,7 +1231,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1224,7 +1231,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
} }
/* If there are callbacks ready, invoke them. */ /* If there are callbacks ready, invoke them. */
rcu_do_batch(rdp); rcu_do_batch(rsp, rdp);
} }
/* /*
...@@ -1288,10 +1295,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), ...@@ -1288,10 +1295,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
} }
/* Force the grace period if too many callbacks or too long waiting. */ /*
if (unlikely(++rdp->qlen > qhimark)) { * Force the grace period if too many callbacks or too long waiting.
* Enforce hysteresis, and don't invoke force_quiescent_state()
* if some other CPU has recently done so. Also, don't bother
* invoking force_quiescent_state() if the newly enqueued callback
* is the only one waiting for a grace period to complete.
*/
if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
rdp->blimit = LONG_MAX; rdp->blimit = LONG_MAX;
if (rsp->n_force_qs == rdp->n_force_qs_snap &&
*rdp->nxttail[RCU_DONE_TAIL] != head)
force_quiescent_state(rsp, 0); force_quiescent_state(rsp, 0);
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->qlen_last_fqs_check = rdp->qlen;
} else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
force_quiescent_state(rsp, 1); force_quiescent_state(rsp, 1);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -1523,6 +1540,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) ...@@ -1523,6 +1540,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
rdp->beenonline = 1; /* We have now been online. */ rdp->beenonline = 1; /* We have now been online. */
rdp->preemptable = preemptable; rdp->preemptable = preemptable;
rdp->passed_quiesc_completed = lastcomp - 1; rdp->passed_quiesc_completed = lastcomp - 1;
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit; rdp->blimit = blimit;
spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */
......
...@@ -167,6 +167,10 @@ struct rcu_data { ...@@ -167,6 +167,10 @@ struct rcu_data {
struct rcu_head *nxtlist; struct rcu_head *nxtlist;
struct rcu_head **nxttail[RCU_NEXT_SIZE]; struct rcu_head **nxttail[RCU_NEXT_SIZE];
long qlen; /* # of queued callbacks */ long qlen; /* # of queued callbacks */
long qlen_last_fqs_check;
/* qlen at last check for QS forcing */
unsigned long n_force_qs_snap;
/* did other CPU force QS recently? */
long blimit; /* Upper limit on a processed batch */ long blimit; /* Upper limit on a processed batch */
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment