Commit b2597f78 authored by David S. Miller's avatar David S. Miller

Merge branch 'replace-tcp_set_state-tracepoint-with-inet_sock_set_state'

Yafang Shao says:

====================
replace tcp_set_state tracepoint with inet_sock_set_state

According to the discussion in the mail thread
https://patchwork.kernel.org/patch/10099243/,
tcp_set_state tracepoint is renamed to inet_sock_set_state tracepoint and is
moved to include/trace/events/sock.h.

With this new tracepoint, we can trace AF_INET/AF_INET6 sock state transitions.
As there's only one single tracepoint for inet, so I didn't create a new trace
file named trace/events/inet_sock.h, and just place it in
include/trace/events/sock.h

Currently TCP/DCCP/SCTP state transitions are traced with this tracepoint.

- Why not more protocol ?
If we really think that anonter protocol should be traced, I will modify the
code to trace it.
I just want to make the code easy and not output useless information.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9ee1942c cbabf463
......@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
int inet_sk_rebuild_header(struct sock *sk);
/**
* inet_sk_state_load - read sk->sk_state for lockless contexts
* @sk: socket pointer
*
* Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
* tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
*/
static inline int inet_sk_state_load(const struct sock *sk)
{
/* state change might impact lockless readers. */
return smp_load_acquire(&sk->sk_state);
}
/**
* inet_sk_state_store - update sk->sk_state
* @sk: socket pointer
* @newstate: new state
*
* Paired with inet_sk_state_load(). Should be used in contexts where
* state change might impact lockless readers.
*/
void inet_sk_state_store(struct sock *sk, int newstate);
void inet_sk_set_state(struct sock *sk, int state);
static inline unsigned int __inet_ehashfn(const __be32 laddr,
const __u16 lport,
const __be32 faddr,
......
......@@ -2333,31 +2333,6 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
/**
* sk_state_load - read sk->sk_state for lockless contexts
* @sk: socket pointer
*
* Paired with sk_state_store(). Used in places we do not hold socket lock :
* tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
*/
static inline int sk_state_load(const struct sock *sk)
{
return smp_load_acquire(&sk->sk_state);
}
/**
* sk_state_store - update sk->sk_state
* @sk: socket pointer
* @newstate: new state
*
* Paired with sk_state_load(). Should be used in contexts where
* state change might impact lockless readers.
*/
static inline void sk_state_store(struct sock *sk, int newstate)
{
smp_store_release(&sk->sk_state, newstate);
}
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
......
......@@ -6,7 +6,50 @@
#define _TRACE_SOCK_H
#include <net/sock.h>
#include <net/ipv6.h>
#include <linux/tracepoint.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
/* The protocol traced by sock_set_state */
#define inet_protocol_names \
EM(IPPROTO_TCP) \
EM(IPPROTO_DCCP) \
EMe(IPPROTO_SCTP)
#define tcp_state_names \
EM(TCP_ESTABLISHED) \
EM(TCP_SYN_SENT) \
EM(TCP_SYN_RECV) \
EM(TCP_FIN_WAIT1) \
EM(TCP_FIN_WAIT2) \
EM(TCP_TIME_WAIT) \
EM(TCP_CLOSE) \
EM(TCP_CLOSE_WAIT) \
EM(TCP_LAST_ACK) \
EM(TCP_LISTEN) \
EM(TCP_CLOSING) \
EMe(TCP_NEW_SYN_RECV)
/* enums need to be exported to user space */
#undef EM
#undef EMe
#define EM(a) TRACE_DEFINE_ENUM(a);
#define EMe(a) TRACE_DEFINE_ENUM(a);
inet_protocol_names
tcp_state_names
#undef EM
#undef EMe
#define EM(a) { a, #a },
#define EMe(a) { a, #a }
#define show_inet_protocol_name(val) \
__print_symbolic(val, inet_protocol_names)
#define show_tcp_state_name(val) \
__print_symbolic(val, tcp_state_names)
TRACE_EVENT(sock_rcvqueue_full,
......@@ -63,6 +106,70 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->rmem_alloc)
);
TRACE_EVENT(inet_sock_set_state,
TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
TP_ARGS(sk, oldstate, newstate),
TP_STRUCT__entry(
__field(const void *, skaddr)
__field(int, oldstate)
__field(int, newstate)
__field(__u16, sport)
__field(__u16, dport)
__field(__u8, protocol)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
__array(__u8, daddr_v6, 16)
),
TP_fast_assign(
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *pin6;
__be32 *p32;
__entry->skaddr = sk;
__entry->oldstate = oldstate;
__entry->newstate = newstate;
__entry->protocol = sk->sk_protocol;
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
pin6 = (struct in6_addr *)__entry->saddr_v6;
*pin6 = sk->sk_v6_rcv_saddr;
pin6 = (struct in6_addr *)__entry->daddr_v6;
*pin6 = sk->sk_v6_daddr;
} else
#endif
{
pin6 = (struct in6_addr *)__entry->saddr_v6;
ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
pin6 = (struct in6_addr *)__entry->daddr_v6;
ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
}
),
TP_printk("protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4"
"saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
show_inet_protocol_name(__entry->protocol),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->oldstate),
show_tcp_state_name(__entry->newstate))
);
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
......
......@@ -9,22 +9,6 @@
#include <linux/tracepoint.h>
#include <net/ipv6.h>
#define tcp_state_name(state) { state, #state }
#define show_tcp_state_name(val) \
__print_symbolic(val, \
tcp_state_name(TCP_ESTABLISHED), \
tcp_state_name(TCP_SYN_SENT), \
tcp_state_name(TCP_SYN_RECV), \
tcp_state_name(TCP_FIN_WAIT1), \
tcp_state_name(TCP_FIN_WAIT2), \
tcp_state_name(TCP_TIME_WAIT), \
tcp_state_name(TCP_CLOSE), \
tcp_state_name(TCP_CLOSE_WAIT), \
tcp_state_name(TCP_LAST_ACK), \
tcp_state_name(TCP_LISTEN), \
tcp_state_name(TCP_CLOSING), \
tcp_state_name(TCP_NEW_SYN_RECV))
/*
* tcp event with arguments sk and skb
*
......
......@@ -110,7 +110,7 @@ void dccp_set_state(struct sock *sk, const int state)
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
sk->sk_state = state;
inet_sk_set_state(sk, state);
}
EXPORT_SYMBOL_GPL(dccp_set_state);
......
......@@ -121,6 +121,7 @@
#endif
#include <net/l3mdev.h>
#include <trace/events/sock.h>
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
......@@ -1220,6 +1221,19 @@ int inet_sk_rebuild_header(struct sock *sk)
}
EXPORT_SYMBOL(inet_sk_rebuild_header);
void inet_sk_set_state(struct sock *sk, int state)
{
trace_inet_sock_set_state(sk, sk->sk_state, state);
sk->sk_state = state;
}
EXPORT_SYMBOL(inet_sk_set_state);
void inet_sk_state_store(struct sock *sk, int newstate)
{
trace_inet_sock_set_state(sk, sk->sk_state, newstate);
smp_store_release(&sk->sk_state, newstate);
}
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
......
......@@ -685,7 +685,7 @@ static void reqsk_timer_handler(struct timer_list *t)
int max_retries, thresh;
u8 defer_accept;
if (sk_state_load(sk_listener) != TCP_LISTEN)
if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
goto drop;
max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
......@@ -783,7 +783,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
newsk->sk_state = TCP_SYN_RECV;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
......@@ -877,7 +877,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
sk_state_store(sk, TCP_LISTEN);
inet_sk_state_store(sk, TCP_LISTEN);
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
inet->inet_sport = htons(inet->inet_num);
......@@ -888,7 +888,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
return 0;
}
sk->sk_state = TCP_CLOSE;
inet_sk_set_state(sk, TCP_CLOSE);
return err;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
......
......@@ -544,7 +544,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} else {
percpu_counter_inc(sk->sk_prot->orphan_count);
sk->sk_state = TCP_CLOSE;
inet_sk_set_state(sk, TCP_CLOSE);
sock_set_flag(sk, SOCK_DEAD);
inet_csk_destroy_sock(sk);
}
......
......@@ -283,8 +283,6 @@
#include <asm/ioctls.h>
#include <net/busy_poll.h>
#include <trace/events/tcp.h>
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
......@@ -504,7 +502,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
sock_poll_wait(file, sk_sleep(sk), wait);
state = sk_state_load(sk);
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
......@@ -2040,8 +2038,6 @@ void tcp_set_state(struct sock *sk, int state)
{
int oldstate = sk->sk_state;
trace_tcp_set_state(sk, oldstate, state);
switch (state) {
case TCP_ESTABLISHED:
if (oldstate != TCP_ESTABLISHED)
......@@ -2065,7 +2061,7 @@ void tcp_set_state(struct sock *sk, int state)
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
sk_state_store(sk, state);
inet_sk_state_store(sk, state);
#ifdef STATE_TRACE
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
......@@ -2920,7 +2916,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (sk->sk_type != SOCK_STREAM)
return;
info->tcpi_state = sk_state_load(sk);
info->tcpi_state = inet_sk_state_load(sk);
/* Report meaningful fields for all TCP states, including listeners */
rate = READ_ONCE(sk->sk_pacing_rate);
......
......@@ -24,7 +24,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
{
struct tcp_info *info = _info;
if (sk_state_load(sk) == TCP_LISTEN) {
if (inet_sk_state_load(sk) == TCP_LISTEN) {
r->idiag_rqueue = sk->sk_ack_backlog;
r->idiag_wqueue = sk->sk_max_ack_backlog;
} else if (sk->sk_type == SOCK_STREAM) {
......
......@@ -2281,7 +2281,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
timer_expires = jiffies;
}
state = sk_state_load(sk);
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
rx_queue = sk->sk_ack_backlog;
else
......
......@@ -1795,7 +1795,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
timer_expires = jiffies;
}
state = sk_state_load(sp);
state = inet_sk_state_load(sp);
if (state == TCP_LISTEN)
rx_queue = sp->sk_ack_backlog;
else
......
......@@ -232,7 +232,7 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
{
ep->base.dead = true;
ep->base.sk->sk_state = SCTP_SS_CLOSED;
inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint(ep);
......
......@@ -878,12 +878,12 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
* successfully completed a connect() call.
*/
if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
sk->sk_state = SCTP_SS_ESTABLISHED;
inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
sctp_sstate(sk, ESTABLISHED)) {
sk->sk_state = SCTP_SS_CLOSING;
inet_sk_set_state(sk, SCTP_SS_CLOSING);
sk->sk_shutdown |= RCV_SHUTDOWN;
}
}
......
......@@ -1544,7 +1544,7 @@ static void sctp_close(struct sock *sk, long timeout)
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
inet_sk_set_state(sk, SCTP_SS_CLOSING);
ep = sctp_sk(sk)->ep;
......@@ -4657,7 +4657,7 @@ static void sctp_shutdown(struct sock *sk, int how)
if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
struct sctp_association *asoc;
sk->sk_state = SCTP_SS_CLOSING;
inet_sk_set_state(sk, SCTP_SS_CLOSING);
asoc = list_entry(ep->asocs.next,
struct sctp_association, asocs);
sctp_primitive_SHUTDOWN(net, asoc, NULL);
......@@ -7513,13 +7513,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
* sockets.
*
*/
sk->sk_state = SCTP_SS_LISTENING;
inet_sk_set_state(sk, SCTP_SS_LISTENING);
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk))
return -EAGAIN;
} else {
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
sk->sk_state = SCTP_SS_CLOSED;
inet_sk_set_state(sk, SCTP_SS_CLOSED);
return -EADDRINUSE;
}
}
......@@ -8542,10 +8542,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
newsk->sk_state = SCTP_SS_CLOSED;
inet_sk_set_state(newsk, SCTP_SS_CLOSED);
newsk->sk_shutdown |= RCV_SHUTDOWN;
} else {
newsk->sk_state = SCTP_SS_ESTABLISHED;
inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
}
release_sock(newsk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment