Commit 714a485a authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2019-06-17

This series contains updates to the iavf driver only.

Akeem updates the driver to change how VLAN tags are being populated and
programmed into the hardware by starting from the first member of the
list until the number of allowed VLAN tags is exhausted.

Mitch fixed the variable type since the variable counter starts out
negative and climbs to zero, so use a signed integer instead of
unsigned.  Also increase the timeout to avoid erroneous errors.  Fixed
the driver to be able to handle when the hardware hands us a null
receive descriptor with no data attached, yet is still valid.

Aleksandr fixes the driver to use GFP_ATOMIC when allocating memory in
atomic context.

Avinash updates the driver to fix a calculation error in virtchnl
regarding the valid length.

Jakub does some refactoring of the commands processing the watchdog
state machine to reduce the length and complexity of the function.  Also
decalre watchdog task as delayed work and use a dedicated work queue to
service the driver tasks.

Paul updated the iavf_process_aq_command to call the necessary functions
to be able to clear cloud filter bits that need to be cleared.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cd4bb2a3 efa14c39
......@@ -171,6 +171,7 @@ enum iavf_state_t {
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_RESETTING, /* in reset */
__IAVF_COMM_FAILED, /* communication with PF failed */
/* Below here, watchdog is running */
__IAVF_DOWN, /* ready, can be opened */
__IAVF_DOWN_PENDING, /* descending, waiting for watchdog */
......@@ -216,7 +217,6 @@ struct iavf_cloud_filter {
/* board specific private data structure */
struct iavf_adapter {
struct timer_list watchdog_timer;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
......@@ -303,7 +303,7 @@ struct iavf_adapter {
enum iavf_state_t state;
unsigned long crit_section;
struct work_struct watchdog_task;
struct delayed_work watchdog_task;
bool netdev_registered;
bool link_up;
enum virtchnl_link_speed link_speed;
......@@ -359,6 +359,7 @@ struct iavf_device {
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
extern const char iavf_driver_version[];
extern struct workqueue_struct *iavf_wq;
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
......
......@@ -510,7 +510,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
queue_work(iavf_wq, &adapter->reset_task);
}
}
......@@ -622,7 +622,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
queue_work(iavf_wq, &adapter->reset_task);
}
return 0;
......
This diff is collapsed.
......@@ -190,7 +190,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct iavf_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
int i = tx_ring->next_to_clean;
struct iavf_tx_buffer *tx_buf;
struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
......@@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif
if (!size)
return;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
......@@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
if (!size)
return NULL;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
......@@ -1299,6 +1305,8 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
unsigned int headlen;
struct sk_buff *skb;
if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
......@@ -1363,6 +1371,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
......@@ -1398,6 +1408,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer)
{
if (!rx_buffer)
return;
if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
iavf_reuse_rx_page(rx_ring, rx_buffer);
......@@ -1496,11 +1509,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
* verified the descriptor has been written back.
*/
dma_rmb();
#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
break;
size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break;
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
......@@ -1516,7 +1530,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
rx_buffer->pagecnt_bias++;
if (rx_buffer)
rx_buffer->pagecnt_bias++;
break;
}
......
......@@ -983,7 +983,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
return;
}
len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
len = ((adapter->num_tc - 1) * sizeof(struct virtchnl_channel_info)) +
sizeof(struct virtchnl_tc_info);
vti = kzalloc(len, GFP_KERNEL);
......@@ -1238,7 +1238,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task);
queue_work(iavf_wq, &adapter->reset_task);
}
break;
default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment