Commit d1a37ded authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: fix a use after free problem in hns3_nic_maybe_stop_tx()

Currently, hns3_nic_maybe_stop_tx() uses skb_copy() to linearize a
SKB if the BD num required by the SKB does not meet the hardware
limitation, and it linearizes the SKB by allocating a new linearized SKB
and freeing the old SKB, if hns3_nic_maybe_stop_tx() returns -EBUSY
because there are no enough space in the ring to send the linearized
skb to hardware, the sch_direct_xmit() still hold reference to old SKB
and try to retransmit the old SKB when dev_hard_start_xmit() return
TX_BUSY, which may cause use after freed problem.

This patch fixes it by using __skb_linearize() to linearize the
SKB in hns3_nic_maybe_stop_tx().

Fixes: 51e8439f ("net: hns3: add 8 BD limit for tx flow")
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2a597eff
...@@ -1288,31 +1288,24 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, ...@@ -1288,31 +1288,24 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct net_device *netdev, struct net_device *netdev,
struct sk_buff **out_skb) struct sk_buff *skb)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
struct sk_buff *skb = *out_skb;
unsigned int bd_num; unsigned int bd_num;
bd_num = hns3_tx_bd_num(skb, bd_size); bd_num = hns3_tx_bd_num(skb, bd_size);
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
struct sk_buff *new_skb;
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
!hns3_skb_need_linearized(skb, bd_size, bd_num)) !hns3_skb_need_linearized(skb, bd_size, bd_num))
goto out; goto out;
/* manual split the send packet */ if (__skb_linearize(skb))
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
return -ENOMEM; return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
bd_num = hns3_tx_bd_count(new_skb->len); bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(new_skb) && (!skb_is_gso(skb) &&
bd_num > HNS3_MAX_NON_TSO_BD_NUM)) bd_num > HNS3_MAX_NON_TSO_BD_NUM))
return -ENOMEM; return -ENOMEM;
...@@ -1415,7 +1408,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1415,7 +1408,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Prefetch the data used later */ /* Prefetch the data used later */
prefetch(skb->data); prefetch(skb->data);
ret = hns3_nic_maybe_stop_tx(ring, netdev, &skb); ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
if (unlikely(ret <= 0)) { if (unlikely(ret <= 0)) {
if (ret == -EBUSY) { if (ret == -EBUSY) {
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment