Commit 74e7e1ef authored by Juergen Gross's avatar Juergen Gross

xen/netback: don't call kfree_skb() with interrupts disabled

It is not allowed to call kfree_skb() from hardware interrupt
context or with interrupts being disabled. So remove kfree_skb()
from the spin_lock_irqsave() section and use the already existing
"drop" label in xenvif_start_xmit() for dropping the SKB. At the
same time replace the dev_kfree_skb() call there with a call of
dev_kfree_skb_any(), as xenvif_start_xmit() can be called with
disabled interrupts.

This is XSA-424 / CVE-2022-42328 / CVE-2022-42329.

Fixes: be81992f ("xen/netback: don't queue unlimited number of packages")
Reported-by: default avatarYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent ad7f402a
...@@ -386,7 +386,7 @@ int xenvif_dealloc_kthread(void *data); ...@@ -386,7 +386,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif); void xenvif_carrier_on(struct xenvif *vif);
......
...@@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
skb_clear_hash(skb); skb_clear_hash(skb);
xenvif_rx_queue_tail(queue, skb); if (!xenvif_rx_queue_tail(queue, skb))
goto drop;
xenvif_kick_thread(queue); xenvif_kick_thread(queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
drop: drop:
vif->dev->stats.tx_dropped++; vif->dev->stats.tx_dropped++;
dev_kfree_skb(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) ...@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
return false; return false;
} }
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{ {
unsigned long flags; unsigned long flags;
bool ret = true;
spin_lock_irqsave(&queue->rx_queue.lock, flags); spin_lock_irqsave(&queue->rx_queue.lock, flags);
...@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) ...@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
struct net_device *dev = queue->vif->dev; struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
kfree_skb(skb); ret = false;
queue->vif->dev->stats.rx_dropped++;
} else { } else {
if (skb_queue_empty(&queue->rx_queue)) if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb); xenvif_update_needed_slots(queue, skb);
...@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) ...@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
} }
spin_unlock_irqrestore(&queue->rx_queue.lock, flags); spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return ret;
} }
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment