Commit c2e245ab authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher

i40e/i40evf: try again after failure

This is the "Don't Give Up" patch.  Previously the
driver could fail an allocation, and then possibly stall
a queue forever, by never coming back to continue receiving
or allocating buffers.

With this patch, the driver will keep polling trying to allocate
receive buffers until it succeeds.  This should keep all receive
queues running even in the face of memory pressure.

Also update copyright year in file header.

Change-ID: I2b103d1ce95b9831288a7222c3343ffa1988b81b
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 40d72a50
...@@ -1195,8 +1195,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) ...@@ -1195,8 +1195,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
* i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -1204,7 +1206,7 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1204,7 +1206,7 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -1251,17 +1253,29 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1251,17 +1253,29 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
* i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -1270,7 +1284,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1270,7 +1284,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -1297,6 +1311,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1297,6 +1311,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
if (dma_mapping_error(rx_ring->dev, bi->dma)) { if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0; bi->dma = 0;
dev_kfree_skb(bi->skb);
bi->skb = NULL;
goto no_buffers; goto no_buffers;
} }
} }
...@@ -1308,9 +1324,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1308,9 +1324,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
...@@ -1494,7 +1520,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -1494,7 +1520,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* *
* Returns true if there's any budget left (e.g. the clean is finished) * Returns true if there's any budget left (e.g. the clean is finished)
**/ **/
static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
...@@ -1504,6 +1530,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1504,6 +1530,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean; u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
...@@ -1516,7 +1543,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1516,7 +1543,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); failure = failure ||
i40e_alloc_rx_buffers_ps(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1546,6 +1575,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1546,6 +1575,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->rx_hdr_len); rx_ring->rx_hdr_len);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
failure = true;
break; break;
} }
...@@ -1675,7 +1705,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1675,7 +1705,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
/** /**
...@@ -1693,6 +1723,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1693,6 +1723,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
u16 rx_packet_len; u16 rx_packet_len;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
u16 i; u16 i;
...@@ -1703,7 +1734,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1703,7 +1734,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); failure = failure ||
i40e_alloc_rx_buffers_1buf(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1802,7 +1835,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1802,7 +1835,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr) static u32 i40e_buildreg_itr(const int type, const u16 itr)
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -316,8 +316,8 @@ struct i40e_ring_container { ...@@ -316,8 +316,8 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \ #define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_headers(struct i40e_ring *rxr); void i40e_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
......
...@@ -667,8 +667,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) ...@@ -667,8 +667,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
* i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -676,7 +678,7 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -676,7 +678,7 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -723,17 +725,29 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -723,17 +725,29 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
* i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -742,7 +756,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -742,7 +756,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -769,6 +783,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -769,6 +783,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
if (dma_mapping_error(rx_ring->dev, bi->dma)) { if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0; bi->dma = 0;
dev_kfree_skb(bi->skb);
bi->skb = NULL;
goto no_buffers; goto no_buffers;
} }
} }
...@@ -780,9 +796,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -780,9 +796,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
...@@ -965,7 +991,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -965,7 +991,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* *
* Returns true if there's any budget left (e.g. the clean is finished) * Returns true if there's any budget left (e.g. the clean is finished)
**/ **/
static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
...@@ -975,6 +1001,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -975,6 +1001,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean; u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
...@@ -984,7 +1011,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -984,7 +1011,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); failure = failure ||
i40evf_alloc_rx_buffers_ps(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1009,6 +1038,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1009,6 +1038,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->rx_hdr_len); rx_ring->rx_hdr_len);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
failure = true;
break; break;
} }
...@@ -1131,7 +1161,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1131,7 +1161,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
/** /**
...@@ -1149,6 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1149,6 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
u16 rx_packet_len; u16 rx_packet_len;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
u16 i; u16 i;
...@@ -1159,7 +1190,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1159,7 +1190,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); failure = failure ||
i40evf_alloc_rx_buffers_1buf(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1240,7 +1273,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1240,7 +1273,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr) static u32 i40e_buildreg_itr(const int type, const u16 itr)
......
...@@ -313,8 +313,8 @@ struct i40e_ring_container { ...@@ -313,8 +313,8 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \ #define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40evf_alloc_rx_headers(struct i40e_ring *rxr); void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment