Commit 9a36a6bc authored by David Howells's avatar David Howells

rxrpc: trace: Don't use __builtin_return_address for sk_buff tracing

In rxrpc tracing, use enums to generate lists of points of interest rather
than __builtin_return_address() for the sk_buff tracepoint.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
parent fa3492ab
...@@ -17,19 +17,31 @@ ...@@ -17,19 +17,31 @@
* Declare tracing information enums and their string mappings for display. * Declare tracing information enums and their string mappings for display.
*/ */
#define rxrpc_skb_traces \ #define rxrpc_skb_traces \
EM(rxrpc_skb_ack, "ACK") \ EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
EM(rxrpc_skb_cleaned, "CLN") \ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
EM(rxrpc_skb_cloned_jumbo, "CLJ") \ EM(rxrpc_skb_get_ack, "GET ack ") \
EM(rxrpc_skb_freed, "FRE") \ EM(rxrpc_skb_get_conn_work, "GET conn-work") \
EM(rxrpc_skb_got, "GOT") \ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
EM(rxrpc_skb_lost, "*L*") \ EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \
EM(rxrpc_skb_new, "NEW") \ EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \
EM(rxrpc_skb_purged, "PUR") \ EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
EM(rxrpc_skb_received, "RCV") \ EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
EM(rxrpc_skb_rotated, "ROT") \ EM(rxrpc_skb_new_unshared, "NEW unshared ") \
EM(rxrpc_skb_seen, "SEE") \ EM(rxrpc_skb_put_ack, "PUT ack ") \
EM(rxrpc_skb_unshared, "UNS") \ EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
E_(rxrpc_skb_unshared_nomem, "US0") EM(rxrpc_skb_put_error_report, "PUT error-rep") \
EM(rxrpc_skb_put_input, "PUT input ") \
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
EM(rxrpc_skb_put_lose, "PUT lose ") \
EM(rxrpc_skb_put_purge, "PUT purge ") \
EM(rxrpc_skb_put_rotate, "PUT rotate ") \
EM(rxrpc_skb_put_unknown, "PUT unknown ") \
EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
EM(rxrpc_skb_see_local_work, "SEE locl-work") \
EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
EM(rxrpc_skb_see_reject, "SEE reject ") \
EM(rxrpc_skb_see_rotate, "SEE rotate ") \
E_(rxrpc_skb_see_version, "SEE version ")
#define rxrpc_local_traces \ #define rxrpc_local_traces \
EM(rxrpc_local_free, "FREE ") \ EM(rxrpc_local_free, "FREE ") \
...@@ -582,33 +594,30 @@ TRACE_EVENT(rxrpc_call, ...@@ -582,33 +594,30 @@ TRACE_EVENT(rxrpc_call,
); );
TRACE_EVENT(rxrpc_skb, TRACE_EVENT(rxrpc_skb,
TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, TP_PROTO(struct sk_buff *skb, int usage, int mod_count,
int usage, int mod_count, const void *where), enum rxrpc_skb_trace why),
TP_ARGS(skb, op, usage, mod_count, where), TP_ARGS(skb, usage, mod_count, why),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct sk_buff *, skb ) __field(struct sk_buff *, skb )
__field(enum rxrpc_skb_trace, op )
__field(int, usage ) __field(int, usage )
__field(int, mod_count ) __field(int, mod_count )
__field(const void *, where ) __field(enum rxrpc_skb_trace, why )
), ),
TP_fast_assign( TP_fast_assign(
__entry->skb = skb; __entry->skb = skb;
__entry->op = op;
__entry->usage = usage; __entry->usage = usage;
__entry->mod_count = mod_count; __entry->mod_count = mod_count;
__entry->where = where; __entry->why = why;
), ),
TP_printk("s=%p Rx %s u=%d m=%d p=%pSR", TP_printk("s=%p Rx %s u=%d m=%d",
__entry->skb, __entry->skb,
__print_symbolic(__entry->op, rxrpc_skb_traces), __print_symbolic(__entry->why, rxrpc_skb_traces),
__entry->usage, __entry->usage,
__entry->mod_count, __entry->mod_count)
__entry->where)
); );
TRACE_EVENT(rxrpc_rx_packet, TRACE_EVENT(rxrpc_rx_packet,
......
...@@ -153,7 +153,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) ...@@ -153,7 +153,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
spin_lock_bh(&call->acks_ack_lock); spin_lock_bh(&call->acks_ack_lock);
ack_skb = call->acks_soft_tbl; ack_skb = call->acks_soft_tbl;
if (ack_skb) { if (ack_skb) {
rxrpc_get_skb(ack_skb, rxrpc_skb_ack); rxrpc_get_skb(ack_skb, rxrpc_skb_get_ack);
ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header); ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
} }
spin_unlock_bh(&call->acks_ack_lock); spin_unlock_bh(&call->acks_ack_lock);
...@@ -251,7 +251,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) ...@@ -251,7 +251,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
no_further_resend: no_further_resend:
spin_unlock(&call->tx_lock); spin_unlock(&call->tx_lock);
no_resend: no_resend:
rxrpc_free_skb(ack_skb, rxrpc_skb_freed); rxrpc_free_skb(ack_skb, rxrpc_skb_put_ack);
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
......
...@@ -663,7 +663,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) ...@@ -663,7 +663,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned); rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
} }
rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned); rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_cleaned); rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_put_ack);
call_rcu(&call->rcu, rxrpc_rcu_destroy_call); call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
} }
......
...@@ -437,7 +437,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn) ...@@ -437,7 +437,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
/* go through the conn-level event packets, releasing the ref on this /* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */ * connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) { while ((skb = skb_dequeue(&conn->rx_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_seen); rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
ret = rxrpc_process_event(conn, skb, &abort_code); ret = rxrpc_process_event(conn, skb, &abort_code);
switch (ret) { switch (ret) {
case -EPROTO: case -EPROTO:
...@@ -449,7 +449,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn) ...@@ -449,7 +449,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
goto requeue_and_leave; goto requeue_and_leave;
case -ECONNABORTED: case -ECONNABORTED:
default: default:
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
break; break;
} }
} }
...@@ -463,7 +463,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn) ...@@ -463,7 +463,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
protocol_error: protocol_error:
if (rxrpc_abort_connection(conn, ret, abort_code) < 0) if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
goto requeue_and_leave; goto requeue_and_leave;
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
return; return;
} }
......
...@@ -485,7 +485,7 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb) ...@@ -485,7 +485,7 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_propose_ack_input_data); rxrpc_propose_ack_input_data);
err_free: err_free:
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
} }
/* /*
...@@ -513,7 +513,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb ...@@ -513,7 +513,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
kdebug("couldn't clone"); kdebug("couldn't clone");
return false; return false;
} }
rxrpc_new_skb(jskb, rxrpc_skb_cloned_jumbo); rxrpc_new_skb(jskb, rxrpc_skb_new_jumbo_subpacket);
jsp = rxrpc_skb(jskb); jsp = rxrpc_skb(jskb);
jsp->offset = offset; jsp->offset = offset;
jsp->len = RXRPC_JUMBO_DATALEN; jsp->len = RXRPC_JUMBO_DATALEN;
...@@ -553,7 +553,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) ...@@ -553,7 +553,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
state = READ_ONCE(call->state); state = READ_ONCE(call->state);
if (state >= RXRPC_CALL_COMPLETE) { if (state >= RXRPC_CALL_COMPLETE) {
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
return; return;
} }
...@@ -563,14 +563,14 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) ...@@ -563,14 +563,14 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
if (sp->hdr.securityIndex != 0) { if (sp->hdr.securityIndex != 0) {
struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
if (!nskb) { if (!nskb) {
rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare_nomem);
return; return;
} }
if (nskb != skb) { if (nskb != skb) {
rxrpc_eaten_skb(skb, rxrpc_skb_received); rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare);
skb = nskb; skb = nskb;
rxrpc_new_skb(skb, rxrpc_skb_unshared); rxrpc_new_skb(skb, rxrpc_skb_new_unshared);
sp = rxrpc_skb(skb); sp = rxrpc_skb(skb);
} }
} }
...@@ -609,7 +609,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) ...@@ -609,7 +609,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_notify_socket(call); rxrpc_notify_socket(call);
spin_unlock(&call->input_lock); spin_unlock(&call->input_lock);
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
_leave(" [queued]"); _leave(" [queued]");
} }
...@@ -994,8 +994,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) ...@@ -994,8 +994,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
out: out:
spin_unlock(&call->input_lock); spin_unlock(&call->input_lock);
out_not_locked: out_not_locked:
rxrpc_free_skb(skb_put, rxrpc_skb_freed); rxrpc_free_skb(skb_put, rxrpc_skb_put_input);
rxrpc_free_skb(skb_old, rxrpc_skb_freed); rxrpc_free_skb(skb_old, rxrpc_skb_put_ack);
} }
/* /*
...@@ -1075,7 +1075,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, ...@@ -1075,7 +1075,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
break; break;
} }
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
no_free: no_free:
_leave(""); _leave("");
} }
...@@ -1137,7 +1137,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, ...@@ -1137,7 +1137,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
skb_queue_tail(&local->event_queue, skb); skb_queue_tail(&local->event_queue, skb);
rxrpc_queue_local(local); rxrpc_queue_local(local);
} else { } else {
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
} }
} }
...@@ -1150,7 +1150,7 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) ...@@ -1150,7 +1150,7 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
skb_queue_tail(&local->reject_queue, skb); skb_queue_tail(&local->reject_queue, skb);
rxrpc_queue_local(local); rxrpc_queue_local(local);
} else { } else {
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
} }
} }
...@@ -1228,7 +1228,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) ...@@ -1228,7 +1228,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
if (skb->tstamp == 0) if (skb->tstamp == 0)
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
rxrpc_new_skb(skb, rxrpc_skb_received); rxrpc_new_skb(skb, rxrpc_skb_new_encap_rcv);
skb_pull(skb, sizeof(struct udphdr)); skb_pull(skb, sizeof(struct udphdr));
...@@ -1245,7 +1245,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) ...@@ -1245,7 +1245,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
static int lose; static int lose;
if ((lose++ & 7) == 7) { if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp); trace_rxrpc_rx_lose(sp);
rxrpc_free_skb(skb, rxrpc_skb_lost); rxrpc_free_skb(skb, rxrpc_skb_put_lose);
return 0; return 0;
} }
} }
...@@ -1286,14 +1286,14 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) ...@@ -1286,14 +1286,14 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
if (sp->hdr.securityIndex != 0) { if (sp->hdr.securityIndex != 0) {
struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
if (!nskb) { if (!nskb) {
rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare_nomem);
goto out; goto out;
} }
if (nskb != skb) { if (nskb != skb) {
rxrpc_eaten_skb(skb, rxrpc_skb_received); rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare);
skb = nskb; skb = nskb;
rxrpc_new_skb(skb, rxrpc_skb_unshared); rxrpc_new_skb(skb, rxrpc_skb_new_unshared);
sp = rxrpc_skb(skb); sp = rxrpc_skb(skb);
} }
} }
...@@ -1434,7 +1434,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) ...@@ -1434,7 +1434,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
goto out; goto out;
discard: discard:
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
out: out:
trace_rxrpc_rx_done(0, 0); trace_rxrpc_rx_done(0, 0);
return 0; return 0;
......
...@@ -88,7 +88,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) ...@@ -88,7 +88,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
if (skb) { if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
rxrpc_see_skb(skb, rxrpc_skb_seen); rxrpc_see_skb(skb, rxrpc_skb_see_local_work);
_debug("{%d},{%u}", local->debug_id, sp->hdr.type); _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
switch (sp->hdr.type) { switch (sp->hdr.type) {
...@@ -105,7 +105,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) ...@@ -105,7 +105,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
break; break;
} }
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
} }
_leave(""); _leave("");
......
...@@ -615,7 +615,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ...@@ -615,7 +615,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
memset(&whdr, 0, sizeof(whdr)); memset(&whdr, 0, sizeof(whdr));
while ((skb = skb_dequeue(&local->reject_queue))) { while ((skb = skb_dequeue(&local->reject_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_seen); rxrpc_see_skb(skb, rxrpc_skb_see_reject);
sp = rxrpc_skb(skb); sp = rxrpc_skb(skb);
switch (skb->mark) { switch (skb->mark) {
...@@ -631,7 +631,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ...@@ -631,7 +631,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
ioc = 2; ioc = 2;
break; break;
default: default:
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
continue; continue;
} }
...@@ -656,7 +656,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ...@@ -656,7 +656,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
rxrpc_tx_point_reject); rxrpc_tx_point_reject);
} }
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_input);
} }
_leave(""); _leave("");
......
...@@ -158,12 +158,12 @@ void rxrpc_error_report(struct sock *sk) ...@@ -158,12 +158,12 @@ void rxrpc_error_report(struct sock *sk)
_leave("UDP socket errqueue empty"); _leave("UDP socket errqueue empty");
return; return;
} }
rxrpc_new_skb(skb, rxrpc_skb_received); rxrpc_new_skb(skb, rxrpc_skb_new_error_report);
serr = SKB_EXT_ERR(skb); serr = SKB_EXT_ERR(skb);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message"); _leave("UDP empty message");
rcu_read_unlock(); rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
return; return;
} }
...@@ -172,7 +172,7 @@ void rxrpc_error_report(struct sock *sk) ...@@ -172,7 +172,7 @@ void rxrpc_error_report(struct sock *sk)
peer = NULL; peer = NULL;
if (!peer) { if (!peer) {
rcu_read_unlock(); rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
_leave(" [no peer]"); _leave(" [no peer]");
return; return;
} }
...@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk) ...@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_store_error(peer, serr); rxrpc_store_error(peer, serr);
out: out:
rcu_read_unlock(); rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
rxrpc_put_peer(peer, rxrpc_peer_put_input_error); rxrpc_put_peer(peer, rxrpc_peer_put_input_error);
_leave(""); _leave("");
......
...@@ -229,7 +229,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) ...@@ -229,7 +229,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
_enter("%d", call->debug_id); _enter("%d", call->debug_id);
skb = skb_dequeue(&call->recvmsg_queue); skb = skb_dequeue(&call->recvmsg_queue);
rxrpc_see_skb(skb, rxrpc_skb_rotated); rxrpc_see_skb(skb, rxrpc_skb_see_rotate);
sp = rxrpc_skb(skb); sp = rxrpc_skb(skb);
tseq = sp->hdr.seq; tseq = sp->hdr.seq;
...@@ -240,7 +240,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) ...@@ -240,7 +240,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
if (after(tseq, call->rx_consumed)) if (after(tseq, call->rx_consumed))
smp_store_release(&call->rx_consumed, tseq); smp_store_release(&call->rx_consumed, tseq);
rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_free_skb(skb, rxrpc_skb_put_rotate);
trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate, trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
serial, call->rx_consumed); serial, call->rx_consumed);
...@@ -302,7 +302,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, ...@@ -302,7 +302,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
*/ */
skb = skb_peek(&call->recvmsg_queue); skb = skb_peek(&call->recvmsg_queue);
while (skb) { while (skb) {
rxrpc_see_skb(skb, rxrpc_skb_seen); rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg);
sp = rxrpc_skb(skb); sp = rxrpc_skb(skb);
seq = sp->hdr.seq; seq = sp->hdr.seq;
......
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
/* ar-skbuff.c: socket buffer destruction handling /* Socket buffer accounting
* *
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com) * Written by David Howells (dhowells@redhat.com)
...@@ -19,56 +19,50 @@ ...@@ -19,56 +19,50 @@
/* /*
* Note the allocation or reception of a socket buffer. * Note the allocation or reception of a socket buffer.
*/ */
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{ {
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(skb)); int n = atomic_inc_return(select_skb_count(skb));
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
} }
/* /*
* Note the re-emergence of a socket buffer from a queue or buffer. * Note the re-emergence of a socket buffer from a queue or buffer.
*/ */
void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{ {
const void *here = __builtin_return_address(0);
if (skb) { if (skb) {
int n = atomic_read(select_skb_count(skb)); int n = atomic_read(select_skb_count(skb));
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
} }
} }
/* /*
* Note the addition of a ref on a socket buffer. * Note the addition of a ref on a socket buffer.
*/ */
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{ {
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(skb)); int n = atomic_inc_return(select_skb_count(skb));
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
skb_get(skb); skb_get(skb);
} }
/* /*
* Note the dropping of a ref on a socket buffer by the core. * Note the dropping of a ref on a socket buffer by the core.
*/ */
void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{ {
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&rxrpc_n_rx_skbs); int n = atomic_inc_return(&rxrpc_n_rx_skbs);
trace_rxrpc_skb(skb, op, 0, n, here); trace_rxrpc_skb(skb, 0, n, why);
} }
/* /*
* Note the destruction of a socket buffer. * Note the destruction of a socket buffer.
*/ */
void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{ {
const void *here = __builtin_return_address(0);
if (skb) { if (skb) {
int n; int n = atomic_dec_return(select_skb_count(skb));
n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
kfree_skb(skb); kfree_skb(skb);
} }
} }
...@@ -78,12 +72,12 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) ...@@ -78,12 +72,12 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
*/ */
void rxrpc_purge_queue(struct sk_buff_head *list) void rxrpc_purge_queue(struct sk_buff_head *list)
{ {
const void *here = __builtin_return_address(0);
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) { while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(select_skb_count(skb)); int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, rxrpc_skb_purged, trace_rxrpc_skb(skb, refcount_read(&skb->users), n,
refcount_read(&skb->users), n, here); rxrpc_skb_put_purge);
kfree_skb(skb); kfree_skb(skb);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment