Commit 404e0a8b authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: ipv4: fix RCU races on dst refcounts

commit c6cffba4 (ipv4: Fix input route performance regression.)
added various fatal races with dst refcounts.

crashes happen on tcp workloads if routes are added/deleted at the same
time.

The dst_free() calls from free_fib_info_rcu() are clearly racy.

We need instead regular dst refcounting (dst_release()) and make
sure dst_release() is aware of RCU grace periods :

Add DST_RCU_FREE flag so that dst_release() respects an RCU grace period
before dst destruction for cached dst

Introduce a new inet_sk_rx_dst_set() helper, using atomic_inc_not_zero()
to make sure we dont increase a zero refcount (On a dst currently
waiting an rcu grace period before destruction)

rt_cache_route() must take a reference on the new cached route, and
release it if was not able to install it.

With this patch, my machines survive various benchmarks.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cca32e4b
...@@ -61,6 +61,7 @@ struct dst_entry { ...@@ -61,6 +61,7 @@ struct dst_entry {
#define DST_NOPEER 0x0040 #define DST_NOPEER 0x0040
#define DST_FAKE_RTABLE 0x0080 #define DST_FAKE_RTABLE 0x0080
#define DST_XFRM_TUNNEL 0x0100 #define DST_XFRM_TUNNEL 0x0100
#define DST_RCU_FREE 0x0200
unsigned short pending_confirm; unsigned short pending_confirm;
...@@ -382,12 +383,6 @@ static inline void dst_free(struct dst_entry *dst) ...@@ -382,12 +383,6 @@ static inline void dst_free(struct dst_entry *dst)
__dst_free(dst); __dst_free(dst);
} }
static inline void dst_rcu_free(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst_free(dst);
}
static inline void dst_confirm(struct dst_entry *dst) static inline void dst_confirm(struct dst_entry *dst)
{ {
dst->pending_confirm = 1; dst->pending_confirm = 1;
......
...@@ -249,4 +249,17 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) ...@@ -249,4 +249,17 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
return flags; return flags;
} }
static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
if (atomic_inc_not_zero(&dst->__refcnt)) {
if (!(dst->flags & DST_RCU_FREE))
dst->flags |= DST_RCU_FREE;
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
}
#endif /* _INET_SOCK_H */ #endif /* _INET_SOCK_H */
...@@ -258,6 +258,15 @@ struct dst_entry *dst_destroy(struct dst_entry * dst) ...@@ -258,6 +258,15 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
} }
EXPORT_SYMBOL(dst_destroy); EXPORT_SYMBOL(dst_destroy);
static void dst_rcu_destroy(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst = dst_destroy(dst);
if (dst)
__dst_free(dst);
}
void dst_release(struct dst_entry *dst) void dst_release(struct dst_entry *dst)
{ {
if (dst) { if (dst) {
...@@ -265,12 +274,16 @@ void dst_release(struct dst_entry *dst) ...@@ -265,12 +274,16 @@ void dst_release(struct dst_entry *dst)
newrefcnt = atomic_dec_return(&dst->__refcnt); newrefcnt = atomic_dec_return(&dst->__refcnt);
WARN_ON(newrefcnt < 0); WARN_ON(newrefcnt < 0);
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { if (unlikely(dst->flags & (DST_NOCACHE | DST_RCU_FREE)) && !newrefcnt) {
if (dst->flags & DST_RCU_FREE) {
call_rcu_bh(&dst->rcu_head, dst_rcu_destroy);
} else {
dst = dst_destroy(dst); dst = dst_destroy(dst);
if (dst) if (dst)
__dst_free(dst); __dst_free(dst);
} }
} }
}
} }
EXPORT_SYMBOL(dst_release); EXPORT_SYMBOL(dst_release);
...@@ -320,11 +333,14 @@ EXPORT_SYMBOL(__dst_destroy_metrics_generic); ...@@ -320,11 +333,14 @@ EXPORT_SYMBOL(__dst_destroy_metrics_generic);
*/ */
void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{ {
bool hold;
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
/* If dst not in cache, we must take a reference, because /* If dst not in cache, we must take a reference, because
* dst_release() will destroy dst as soon as its refcount becomes zero * dst_release() will destroy dst as soon as its refcount becomes zero
*/ */
if (unlikely(dst->flags & DST_NOCACHE)) { hold = (dst->flags & (DST_NOCACHE | DST_RCU_FREE)) == DST_NOCACHE;
if (unlikely(hold)) {
dst_hold(dst); dst_hold(dst);
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
} else { } else {
......
...@@ -184,6 +184,12 @@ static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) ...@@ -184,6 +184,12 @@ static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
return dn_rt_hash_mask & (unsigned int)tmp; return dn_rt_hash_mask & (unsigned int)tmp;
} }
static inline void dst_rcu_free(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst_free(dst);
}
static inline void dnrt_free(struct dn_route *rt) static inline void dnrt_free(struct dn_route *rt)
{ {
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
......
...@@ -172,9 +172,9 @@ static void free_fib_info_rcu(struct rcu_head *head) ...@@ -172,9 +172,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
if (nexthop_nh->nh_exceptions) if (nexthop_nh->nh_exceptions)
free_nh_exceptions(nexthop_nh); free_nh_exceptions(nexthop_nh);
if (nexthop_nh->nh_rth_output) if (nexthop_nh->nh_rth_output)
dst_free(&nexthop_nh->nh_rth_output->dst); dst_release(&nexthop_nh->nh_rth_output->dst);
if (nexthop_nh->nh_rth_input) if (nexthop_nh->nh_rth_input)
dst_free(&nexthop_nh->nh_rth_input->dst); dst_release(&nexthop_nh->nh_rth_input->dst);
} endfor_nexthops(fi); } endfor_nexthops(fi);
release_net(fi->fib_net); release_net(fi->fib_net);
......
...@@ -1199,11 +1199,6 @@ static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, ...@@ -1199,11 +1199,6 @@ static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
fnhe->fnhe_stamp = jiffies; fnhe->fnhe_stamp = jiffies;
} }
static inline void rt_free(struct rtable *rt)
{
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
static void rt_cache_route(struct fib_nh *nh, struct rtable *rt) static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
{ {
struct rtable *orig, *prev, **p = &nh->nh_rth_output; struct rtable *orig, *prev, **p = &nh->nh_rth_output;
...@@ -1213,17 +1208,14 @@ static void rt_cache_route(struct fib_nh *nh, struct rtable *rt) ...@@ -1213,17 +1208,14 @@ static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
orig = *p; orig = *p;
rt->dst.flags |= DST_RCU_FREE;
dst_hold(&rt->dst);
prev = cmpxchg(p, orig, rt); prev = cmpxchg(p, orig, rt);
if (prev == orig) { if (prev == orig) {
if (orig) if (orig)
rt_free(orig); dst_release(&orig->dst);
} else { } else {
/* Routes we intend to cache in the FIB nexthop have dst_release(&rt->dst);
* the DST_NOCACHE bit clear. However, if we are
* unsuccessful at storing this route into the cache
* we really need to set it.
*/
rt->dst.flags |= DST_NOCACHE;
} }
} }
......
...@@ -5604,8 +5604,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) ...@@ -5604,8 +5604,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED); tcp_set_state(sk, TCP_ESTABLISHED);
if (skb != NULL) { if (skb != NULL) {
sk->sk_rx_dst = dst_clone(skb_dst(skb)); inet_sk_rx_dst_set(sk, skb);
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
security_inet_conn_established(sk, skb); security_inet_conn_established(sk, skb);
} }
......
...@@ -1617,19 +1617,19 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1617,19 +1617,19 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif #endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb);
if (sk->sk_rx_dst) {
struct dst_entry *dst = sk->sk_rx_dst; struct dst_entry *dst = sk->sk_rx_dst;
sock_rps_save_rxhash(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
dst->ops->check(dst, 0) == NULL) { dst->ops->check(dst, 0) == NULL) {
dst_release(dst); dst_release(dst);
sk->sk_rx_dst = NULL; sk->sk_rx_dst = NULL;
} }
} }
if (unlikely(sk->sk_rx_dst == NULL)) { if (unlikely(sk->sk_rx_dst == NULL))
sk->sk_rx_dst = dst_clone(skb_dst(skb)); inet_sk_rx_dst_set(sk, skb);
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk; rsk = sk;
goto reset; goto reset;
......
...@@ -387,8 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -387,8 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_sock *oldtp = tcp_sk(sk); struct tcp_sock *oldtp = tcp_sk(sk);
struct tcp_cookie_values *oldcvp = oldtp->cookie_values; struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
newsk->sk_rx_dst = dst_clone(skb_dst(skb)); inet_sk_rx_dst_set(newsk, skb);
inet_sk(newsk)->rx_dst_ifindex = skb->skb_iif;
/* TCP Cookie Transactions require space for the cookie pair, /* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to * as it differs for each connection. There is no need to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment