Commit c29a0bc4 authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by David S. Miller

[SOCK][NETNS]: Add a struct net argument to sock_prot_inuse_add and _get.

This counter is about to become per-proto-and-per-net, so we'll need 
two arguments to determine which cell in this "table" to work with.

All the places, but proc already pass proper net to it - proc will be
tuned a bit later.

Some indentation with spaces in proc files is done to keep the file
coding style consistent.
Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8efa6e93
...@@ -635,10 +635,11 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) ...@@ -635,10 +635,11 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Called with local bh disabled */ /* Called with local bh disabled */
extern void sock_prot_inuse_add(struct proto *prot, int inc); extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
extern int sock_prot_inuse_get(struct proto *proto); extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
#else #else
static void inline sock_prot_inuse_add(struct proto *prot, int inc) static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
int inc)
{ {
} }
#endif #endif
......
...@@ -115,7 +115,7 @@ static inline void udp_lib_unhash(struct sock *sk) ...@@ -115,7 +115,7 @@ static inline void udp_lib_unhash(struct sock *sk)
write_lock_bh(&udp_hash_lock); write_lock_bh(&udp_hash_lock);
if (sk_del_node_init(sk)) { if (sk_del_node_init(sk)) {
inet_sk(sk)->num = 0; inet_sk(sk)->num = 0;
sock_prot_inuse_add(sk->sk_prot, -1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
} }
write_unlock_bh(&udp_hash_lock); write_unlock_bh(&udp_hash_lock);
} }
......
...@@ -1949,13 +1949,13 @@ struct prot_inuse { ...@@ -1949,13 +1949,13 @@ struct prot_inuse {
static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
void sock_prot_inuse_add(struct proto *prot, int val) void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
{ {
__get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
} }
EXPORT_SYMBOL_GPL(sock_prot_inuse_add); EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
int sock_prot_inuse_get(struct proto *prot) int sock_prot_inuse_get(struct net *net, struct proto *prot)
{ {
int cpu, idx = prot->inuse_idx; int cpu, idx = prot->inuse_idx;
int res = 0; int res = 0;
......
...@@ -288,7 +288,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, ...@@ -288,7 +288,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
sk->sk_hash = hash; sk->sk_hash = hash;
BUG_TRAP(sk_unhashed(sk)); BUG_TRAP(sk_unhashed(sk));
__sk_add_node(sk, &head->chain); __sk_add_node(sk, &head->chain);
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock); write_unlock(lock);
if (twp) { if (twp) {
...@@ -332,7 +332,7 @@ void __inet_hash_nolisten(struct sock *sk) ...@@ -332,7 +332,7 @@ void __inet_hash_nolisten(struct sock *sk)
write_lock(lock); write_lock(lock);
__sk_add_node(sk, list); __sk_add_node(sk, list);
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock); write_unlock(lock);
} }
EXPORT_SYMBOL_GPL(__inet_hash_nolisten); EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
...@@ -354,7 +354,7 @@ static void __inet_hash(struct sock *sk) ...@@ -354,7 +354,7 @@ static void __inet_hash(struct sock *sk)
inet_listen_wlock(hashinfo); inet_listen_wlock(hashinfo);
__sk_add_node(sk, list); __sk_add_node(sk, list);
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock); write_unlock(lock);
wake_up(&hashinfo->lhash_wait); wake_up(&hashinfo->lhash_wait);
} }
...@@ -387,7 +387,7 @@ void inet_unhash(struct sock *sk) ...@@ -387,7 +387,7 @@ void inet_unhash(struct sock *sk)
} }
if (__sk_del_node_init(sk)) if (__sk_del_node_init(sk))
sock_prot_inuse_add(sk->sk_prot, -1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(lock); write_unlock_bh(lock);
out: out:
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
......
...@@ -91,7 +91,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, ...@@ -91,7 +91,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
/* Step 2: Remove SK from established hash. */ /* Step 2: Remove SK from established hash. */
if (__sk_del_node_init(sk)) if (__sk_del_node_init(sk))
sock_prot_inuse_add(sk->sk_prot, -1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
/* Step 3: Hash TW into TIMEWAIT chain. */ /* Step 3: Hash TW into TIMEWAIT chain. */
inet_twsk_add_node(tw, &ehead->twchain); inet_twsk_add_node(tw, &ehead->twchain);
......
...@@ -53,14 +53,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) ...@@ -53,14 +53,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
{ {
socket_seq_show(seq); socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
sock_prot_inuse_get(&tcp_prot), sock_prot_inuse_get(&init_net, &tcp_prot),
atomic_read(&tcp_orphan_count), atomic_read(&tcp_orphan_count),
tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
atomic_read(&tcp_memory_allocated)); atomic_read(&tcp_memory_allocated));
seq_printf(seq, "UDP: inuse %d mem %d\n", sock_prot_inuse_get(&udp_prot), seq_printf(seq, "UDP: inuse %d mem %d\n",
sock_prot_inuse_get(&init_net, &udp_prot),
atomic_read(&udp_memory_allocated)); atomic_read(&udp_memory_allocated));
seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(&udplite_prot)); seq_printf(seq, "UDPLITE: inuse %d\n",
seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(&raw_prot)); sock_prot_inuse_get(&init_net, &udplite_prot));
seq_printf(seq, "RAW: inuse %d\n",
sock_prot_inuse_get(&init_net, &raw_prot));
seq_printf(seq, "FRAG: inuse %d memory %d\n", seq_printf(seq, "FRAG: inuse %d memory %d\n",
ip_frag_nqueues(&init_net), ip_frag_mem(&init_net)); ip_frag_nqueues(&init_net), ip_frag_mem(&init_net));
return 0; return 0;
......
...@@ -93,7 +93,7 @@ void raw_hash_sk(struct sock *sk) ...@@ -93,7 +93,7 @@ void raw_hash_sk(struct sock *sk)
write_lock_bh(&h->lock); write_lock_bh(&h->lock);
sk_add_node(sk, head); sk_add_node(sk, head);
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock); write_unlock_bh(&h->lock);
} }
EXPORT_SYMBOL_GPL(raw_hash_sk); EXPORT_SYMBOL_GPL(raw_hash_sk);
...@@ -104,7 +104,7 @@ void raw_unhash_sk(struct sock *sk) ...@@ -104,7 +104,7 @@ void raw_unhash_sk(struct sock *sk)
write_lock_bh(&h->lock); write_lock_bh(&h->lock);
if (sk_del_node_init(sk)) if (sk_del_node_init(sk))
sock_prot_inuse_add(sk->sk_prot, -1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&h->lock); write_unlock_bh(&h->lock);
} }
EXPORT_SYMBOL_GPL(raw_unhash_sk); EXPORT_SYMBOL_GPL(raw_unhash_sk);
......
...@@ -231,7 +231,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, ...@@ -231,7 +231,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (sk_unhashed(sk)) { if (sk_unhashed(sk)) {
head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
sk_add_node(sk, head); sk_add_node(sk, head);
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} }
error = 0; error = 0;
fail: fail:
......
...@@ -43,7 +43,7 @@ void __inet6_hash(struct sock *sk) ...@@ -43,7 +43,7 @@ void __inet6_hash(struct sock *sk)
} }
__sk_add_node(sk, list); __sk_add_node(sk, list);
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock); write_unlock(lock);
} }
EXPORT_SYMBOL(__inet6_hash); EXPORT_SYMBOL(__inet6_hash);
...@@ -204,7 +204,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, ...@@ -204,7 +204,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
BUG_TRAP(sk_unhashed(sk)); BUG_TRAP(sk_unhashed(sk));
__sk_add_node(sk, &head->chain); __sk_add_node(sk, &head->chain);
sk->sk_hash = hash; sk->sk_hash = hash;
sock_prot_inuse_add(sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock); write_unlock(lock);
if (twp != NULL) { if (twp != NULL) {
......
...@@ -155,10 +155,11 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, ...@@ -155,10 +155,11 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (sk->sk_protocol == IPPROTO_TCP) { if (sk->sk_protocol == IPPROTO_TCP) {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct net *net = sock_net(sk);
local_bh_disable(); local_bh_disable();
sock_prot_inuse_add(sk->sk_prot, -1); sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(&tcp_prot, 1); sock_prot_inuse_add(net, &tcp_prot, 1);
local_bh_enable(); local_bh_enable();
sk->sk_prot = &tcp_prot; sk->sk_prot = &tcp_prot;
icsk->icsk_af_ops = &ipv4_specific; icsk->icsk_af_ops = &ipv4_specific;
...@@ -167,12 +168,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, ...@@ -167,12 +168,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else { } else {
struct proto *prot = &udp_prot; struct proto *prot = &udp_prot;
struct net *net = sock_net(sk);
if (sk->sk_protocol == IPPROTO_UDPLITE) if (sk->sk_protocol == IPPROTO_UDPLITE)
prot = &udplite_prot; prot = &udplite_prot;
local_bh_disable(); local_bh_disable();
sock_prot_inuse_add(sk->sk_prot, -1); sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(prot, 1); sock_prot_inuse_add(net, prot, 1);
local_bh_enable(); local_bh_enable();
sk->sk_prot = prot; sk->sk_prot = prot;
sk->sk_socket->ops = &inet_dgram_ops; sk->sk_socket->ops = &inet_dgram_ops;
......
...@@ -36,13 +36,13 @@ static struct proc_dir_entry *proc_net_devsnmp6; ...@@ -36,13 +36,13 @@ static struct proc_dir_entry *proc_net_devsnmp6;
static int sockstat6_seq_show(struct seq_file *seq, void *v) static int sockstat6_seq_show(struct seq_file *seq, void *v)
{ {
seq_printf(seq, "TCP6: inuse %d\n", seq_printf(seq, "TCP6: inuse %d\n",
sock_prot_inuse_get(&tcpv6_prot)); sock_prot_inuse_get(&init_net, &tcpv6_prot));
seq_printf(seq, "UDP6: inuse %d\n", seq_printf(seq, "UDP6: inuse %d\n",
sock_prot_inuse_get(&udpv6_prot)); sock_prot_inuse_get(&init_net, &udpv6_prot));
seq_printf(seq, "UDPLITE6: inuse %d\n", seq_printf(seq, "UDPLITE6: inuse %d\n",
sock_prot_inuse_get(&udplitev6_prot)); sock_prot_inuse_get(&init_net, &udplitev6_prot));
seq_printf(seq, "RAW6: inuse %d\n", seq_printf(seq, "RAW6: inuse %d\n",
sock_prot_inuse_get(&rawv6_prot)); sock_prot_inuse_get(&init_net, &rawv6_prot));
seq_printf(seq, "FRAG6: inuse %d memory %d\n", seq_printf(seq, "FRAG6: inuse %d memory %d\n",
ip6_frag_nqueues(&init_net), ip6_frag_mem(&init_net)); ip6_frag_nqueues(&init_net), ip6_frag_mem(&init_net));
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment