Commit a06316dc authored by Björn Töpel's avatar Björn Töpel Committed by Tony Nguyen

ixgbe: avoid premature Rx buffer reuse

The page recycle code, incorrectly, relied on that a page fragment
could not be freed inside xdp_do_redirect(). This assumption leads to
that page fragments that are used by the stack/XDP redirect can be
reused and overwritten.

To avoid this, store the page count prior invoking xdp_do_redirect().

Fixes: 64530739 ("ixgbe: add initial support for xdp redirect")
Reported-and-analyzed-by: default avatarLi RongQing <lirongqing@baidu.com>
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Tested-by: default avatarSandeep Penigalapati <sandeep.penigalapati@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 75aab4e1
...@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page) ...@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
} }
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
...@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) ...@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false; return false;
#else #else
/* The last offset is a bit aggressive in that we assume the /* The last offset is a bit aggressive in that we assume the
...@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, ...@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb, struct sk_buff **skb,
const unsigned int size) const unsigned int size,
int *rx_buffer_pgcnt)
{ {
struct ixgbe_rx_buffer *rx_buffer; struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page); prefetchw(rx_buffer->page);
*skb = rx_buffer->skb; *skb = rx_buffer->skb;
...@@ -2055,9 +2063,10 @@ static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, ...@@ -2055,9 +2063,10 @@ static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer, struct ixgbe_rx_buffer *rx_buffer,
struct sk_buff *skb) struct sk_buff *skb,
int rx_buffer_pgcnt)
{ {
if (ixgbe_can_reuse_rx_page(rx_buffer)) { if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer); ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else { } else {
...@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc; union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer; struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb; struct sk_buff *skb;
int rx_buffer_pgcnt;
unsigned int size; unsigned int size;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
...@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/ */
dma_rmb(); dma_rmb();
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (!skb) { if (!skb) {
...@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break; break;
} }
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++; cleaned_count++;
/* place incomplete frames back on ring for completion */ /* place incomplete frames back on ring for completion */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment