Commit 971af18b authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller

[IPV6]: Reuse inet_csk_get_port in tcp_v6_get_port

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 89cee8b1
......@@ -192,8 +192,12 @@ extern struct request_sock *inet_csk_search_req(const struct sock *sk,
const __u16 rport,
const __u32 raddr,
const __u32 laddr);
extern int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb);
extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
struct sock *sk, unsigned short snum);
struct sock *sk, unsigned short snum,
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb));
extern struct dst_entry* inet_csk_route_req(struct sock *sk,
const struct request_sock *req);
......
......@@ -37,7 +37,8 @@ EXPORT_SYMBOL_GPL(dccp_hashinfo);
static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
{
return inet_csk_get_port(&dccp_hashinfo, sk, snum);
return inet_csk_get_port(&dccp_hashinfo, sk, snum,
inet_csk_bind_conflict);
}
static void dccp_v4_hash(struct sock *sk)
......
......@@ -37,7 +37,8 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg);
*/
int sysctl_local_port_range[2] = { 1024, 4999 };
static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
struct sock *sk2;
......@@ -62,11 +63,15 @@ static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucke
return node != NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
*/
int inet_csk_get_port(struct inet_hashinfo *hashinfo,
struct sock *sk, unsigned short snum)
struct sock *sk, unsigned short snum,
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb))
{
struct inet_bind_hashbucket *head;
struct hlist_node *node;
......@@ -125,7 +130,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo,
goto success;
} else {
ret = 1;
if (inet_csk_bind_conflict(sk, tb))
if (bind_conflict(sk, tb))
goto fail_unlock;
}
}
......
......@@ -97,7 +97,8 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
{
return inet_csk_get_port(&tcp_hashinfo, sk, snum);
return inet_csk_get_port(&tcp_hashinfo, sk, snum,
inet_csk_bind_conflict);
}
static void tcp_v4_hash(struct sock *sk)
......
......@@ -76,7 +76,7 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
static struct tcp_func ipv6_mapped;
static struct tcp_func ipv6_specific;
static inline int tcp_v6_bind_conflict(const struct sock *sk,
int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
const struct sock *sk2;
......@@ -97,97 +97,10 @@ static inline int tcp_v6_bind_conflict(const struct sock *sk,
return node != NULL;
}
/* Grrr, addr_type already calculated by caller, but I don't want
* to add some silly "cookie" argument to this method just for that.
* But it doesn't matter, the recalculation is in the rarest path
* this function ever takes.
*/
static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
{
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
struct hlist_node *node;
int ret;
local_bh_disable();
if (snum == 0) {
int low = sysctl_local_port_range[0];
int high = sysctl_local_port_range[1];
int remaining = (high - low) + 1;
int rover = net_random() % (high - low) + low;
do {
head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover)
goto next;
break;
next:
spin_unlock(&head->lock);
if (++rover > high)
rover = low;
} while (--remaining > 0);
/* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash
* locks if this test triggers, because if 'remaining'
* drops to zero, we broke out of the do/while loop at
* the top level, not from the 'break;' statement.
*/
ret = 1;
if (unlikely(remaining <= 0))
goto fail;
/* OK, here is the one we will use. */
snum = rover;
} else {
head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum)
goto tb_found;
}
tb = NULL;
goto tb_not_found;
tb_found:
if (tb && !hlist_empty(&tb->owners)) {
if (tb->fastreuse > 0 && sk->sk_reuse &&
sk->sk_state != TCP_LISTEN) {
goto success;
} else {
ret = 1;
if (tcp_v6_bind_conflict(sk, tb))
goto fail_unlock;
}
}
tb_not_found:
ret = 1;
if (tb == NULL) {
tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
if (tb == NULL)
goto fail_unlock;
}
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
} else if (tb->fastreuse &&
(!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
ret = 0;
fail_unlock:
spin_unlock(&head->lock);
fail:
local_bh_enable();
return ret;
return inet_csk_get_port(&tcp_hashinfo, sk, snum,
inet6_csk_bind_conflict);
}
static __inline__ void __tcp_v6_hash(struct sock *sk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment