Commit a38e5e23 authored by Simon Horman's avatar Simon Horman Committed by Pablo Neira Ayuso

ipvs: use cond_resched_rcu() helper when walking connections

This avoids the situation where walking of a large number of connections
may prevent scheduling for a long time while also avoiding excessive
calls to rcu_read_unlock() and rcu_read_lock().

Note that in the case of !CONFIG_PREEMPT_RCU this will
add a call to cond_resched().
Signed-off-by: default avatarJulian Anastasov <ja@ssi.bg>
Signed-off-by: default avatarSimon Horman <horms@verge.net.au>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent f6f3c437
...@@ -975,8 +975,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) ...@@ -975,8 +975,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
return cp; return cp;
} }
} }
rcu_read_unlock(); cond_resched_rcu();
rcu_read_lock();
} }
return NULL; return NULL;
...@@ -1015,8 +1014,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) ...@@ -1015,8 +1014,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
iter->l = &ip_vs_conn_tab[idx]; iter->l = &ip_vs_conn_tab[idx];
return cp; return cp;
} }
rcu_read_unlock(); cond_resched_rcu();
rcu_read_lock();
} }
iter->l = NULL; iter->l = NULL;
return NULL; return NULL;
...@@ -1206,17 +1204,13 @@ void ip_vs_random_dropentry(struct net *net) ...@@ -1206,17 +1204,13 @@ void ip_vs_random_dropentry(struct net *net)
int idx; int idx;
struct ip_vs_conn *cp, *cp_c; struct ip_vs_conn *cp, *cp_c;
rcu_read_lock();
/* /*
* Randomly scan 1/32 of the whole table every second * Randomly scan 1/32 of the whole table every second
*/ */
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned int hash = net_random() & ip_vs_conn_tab_mask; unsigned int hash = net_random() & ip_vs_conn_tab_mask;
/*
* Lock is actually needed in this loop.
*/
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE) if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */ /* connection template */
...@@ -1252,8 +1246,9 @@ void ip_vs_random_dropentry(struct net *net) ...@@ -1252,8 +1246,9 @@ void ip_vs_random_dropentry(struct net *net)
__ip_vs_conn_put(cp); __ip_vs_conn_put(cp);
} }
} }
rcu_read_unlock(); cond_resched_rcu();
} }
rcu_read_unlock();
} }
...@@ -1267,11 +1262,8 @@ static void ip_vs_conn_flush(struct net *net) ...@@ -1267,11 +1262,8 @@ static void ip_vs_conn_flush(struct net *net)
struct netns_ipvs *ipvs = net_ipvs(net); struct netns_ipvs *ipvs = net_ipvs(net);
flush_again: flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
/*
* Lock is actually needed in this loop.
*/
rcu_read_lock(); rcu_read_lock();
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
if (!ip_vs_conn_net_eq(cp, net)) if (!ip_vs_conn_net_eq(cp, net))
...@@ -1286,8 +1278,9 @@ static void ip_vs_conn_flush(struct net *net) ...@@ -1286,8 +1278,9 @@ static void ip_vs_conn_flush(struct net *net)
__ip_vs_conn_put(cp); __ip_vs_conn_put(cp);
} }
} }
rcu_read_unlock(); cond_resched_rcu();
} }
rcu_read_unlock();
/* the counter may be not NULL, because maybe some conn entries /* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */ are run by slow timer handler or unhashed but still referred */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment