Commit c5306726 authored by David S. Miller's avatar David S. Miller

Merge branch 'sock_queue_err_skb'

Alexander Duyck says:

====================
Address reference counting issues with sock_queue_err_skb

After looking over the code for skb_clone_sk after some comments made by
Eric Dumazet I have come to the conclusion that skb_clone_sk is taking the
correct approach in how to handle the sk_refcnt when creating a buffer that
is eventually meant to be returned to the socket via the sock_queue_err_skb
function.

However upon review of other callers I found what I believe to be a
possible reference count issue in the path for handling "wifi ack" packets.
To address this I have applied the same logic that is currently in place so
that the sk_refcnt will be forced to stay at least 1, or we will not
provide an skb to return in the sk_error_queue.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 72b126a4 bf7fa551
...@@ -3511,6 +3511,19 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) ...@@ -3511,6 +3511,19 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
} }
EXPORT_SYMBOL(sock_dequeue_err_skb); EXPORT_SYMBOL(sock_dequeue_err_skb);
/**
* skb_clone_sk - create clone of skb, and take reference to socket
* @skb: the skb to clone
*
* This function creates a clone of a buffer that holds a reference on
* sk_refcnt. Buffers created via this function are meant to be
* returned using sock_queue_err_skb, or free via kfree_skb.
*
* When passing buffers allocated with this function to sock_queue_err_skb
* it is necessary to wrap the call with sock_hold/sock_put in order to
* prevent the socket from being released prior to being enqueued on
* the sk_error_queue.
*/
struct sk_buff *skb_clone_sk(struct sk_buff *skb) struct sk_buff *skb_clone_sk(struct sk_buff *skb)
{ {
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
...@@ -3615,9 +3628,14 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) ...@@ -3615,9 +3628,14 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
serr->ee.ee_errno = ENOMSG; serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
/* take a reference to prevent skb_orphan() from freeing the socket */
sock_hold(sk);
err = sock_queue_err_skb(sk, skb); err = sock_queue_err_skb(sk, skb);
if (err) if (err)
kfree_skb(skb); kfree_skb(skb);
sock_put(sk);
} }
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
......
...@@ -2072,30 +2072,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, ...@@ -2072,30 +2072,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (unlikely(!multicast && skb->sk && if (unlikely(!multicast && skb->sk &&
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
struct sk_buff *orig_skb = skb; struct sk_buff *ack_skb = skb_clone_sk(skb);
skb = skb_clone(skb, GFP_ATOMIC); if (ack_skb) {
if (skb) {
unsigned long flags; unsigned long flags;
int id; int id;
spin_lock_irqsave(&local->ack_status_lock, flags); spin_lock_irqsave(&local->ack_status_lock, flags);
id = idr_alloc(&local->ack_status_frames, orig_skb, id = idr_alloc(&local->ack_status_frames, ack_skb,
1, 0x10000, GFP_ATOMIC); 1, 0x10000, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, flags); spin_unlock_irqrestore(&local->ack_status_lock, flags);
if (id >= 0) { if (id >= 0) {
info_id = id; info_id = id;
info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
} else if (skb_shared(skb)) {
kfree_skb(orig_skb);
} else { } else {
kfree_skb(skb); kfree_skb(ack_skb);
skb = orig_skb;
} }
} else {
/* couldn't clone -- lose tx status ... */
skb = orig_skb;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment