Commit ff74e23f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: md5: input path is run under rcu protected sections

It is guaranteed that both tcp_v4_rcv() and tcp_v6_rcv()
run from rcu read locked sections :

ip_local_deliver_finish() and ip6_input_finish() both
use rcu_read_lock()

Also align tcp_v6_inbound_md5_hash() on tcp_v4_inbound_md5_hash()
by returning a boolean.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0980c1e3
...@@ -1153,8 +1153,9 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, ...@@ -1153,8 +1153,9 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
} }
EXPORT_SYMBOL(tcp_v4_md5_hash_skb); EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
static bool __tcp_v4_inbound_md5_hash(struct sock *sk, /* Called with rcu_read_lock() */
const struct sk_buff *skb) static bool tcp_v4_inbound_md5_hash(struct sock *sk,
const struct sk_buff *skb)
{ {
/* /*
* This gets called for each TCP segment that arrives * This gets called for each TCP segment that arrives
...@@ -1206,18 +1207,6 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk, ...@@ -1206,18 +1207,6 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
} }
return false; return false;
} }
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
bool ret;
rcu_read_lock();
ret = __tcp_v4_inbound_md5_hash(sk, skb);
rcu_read_unlock();
return ret;
}
#endif #endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener, static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
......
...@@ -633,8 +633,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, ...@@ -633,8 +633,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
return 1; return 1;
} }
static int __tcp_v6_inbound_md5_hash(struct sock *sk, static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
const struct sk_buff *skb)
{ {
const __u8 *hash_location = NULL; const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected; struct tcp_md5sig_key *hash_expected;
...@@ -648,16 +647,16 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk, ...@@ -648,16 +647,16 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk,
/* We've parsed the options - do we have a hash? */ /* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location) if (!hash_expected && !hash_location)
return 0; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return 1; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return 1; return true;
} }
/* check the signature */ /* check the signature */
...@@ -670,22 +669,10 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk, ...@@ -670,22 +669,10 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk,
genhash ? "failed" : "mismatch", genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source), &ip6h->saddr, ntohs(th->source),
&ip6h->daddr, ntohs(th->dest)); &ip6h->daddr, ntohs(th->dest));
return 1; return true;
} }
return 0; return false;
}
static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
int ret;
rcu_read_lock();
ret = __tcp_v6_inbound_md5_hash(sk, skb);
rcu_read_unlock();
return ret;
} }
#endif #endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment