Commit 72ec301a authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ixgbe and igb.

Alexander Duyck (13):
  ixgbe: Initialize q_vector cpu and affinity masks correctly
  ixgbe: Enable jumbo frames support w/ SR-IOV
  ixgbe: Move message handling routines into their own functions
  ixgbe: Add mailbox API version negotiation support to ixgbe PF
  igb: Split Rx timestamping into two separate functions
  igb: Do not use header split, instead receive all frames into a
    single buffer
  igb: Combine post-processing of skb into a single function
  igb: Map entire page and sync half instead of mapping and unmapping
    half pages
  igb: Move rx_buffer related code in Rx cleanup path into separate
    function
  igb: Lock buffer size at 2K even on systems with larger pages
  igb: Combine q_vector and ring allocation into a single function
  igb: Move the calls to set the Tx and Rx queues into igb_open
  igb: Split igb_update_dca into separate Tx and Rx functions

Tushar Dave (1):
  igb: Correcting and improving small packet check and padding
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1b6f0f92 6a05004a
...@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc { ...@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */ /* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
......
...@@ -133,8 +133,9 @@ struct vf_data_storage { ...@@ -133,8 +133,9 @@ struct vf_data_storage {
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384 #define IGB_RXBUFFER_2048 2048
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16 #define IGB_TX_QUEUE_WAKE 16
...@@ -174,11 +175,9 @@ struct igb_tx_buffer { ...@@ -174,11 +175,9 @@ struct igb_tx_buffer {
}; };
struct igb_rx_buffer { struct igb_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
dma_addr_t page_dma; unsigned int page_offset;
u32 page_offset;
}; };
struct igb_tx_queue_stats { struct igb_tx_queue_stats {
...@@ -205,22 +204,6 @@ struct igb_ring_container { ...@@ -205,22 +204,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */ u8 itr; /* current ITR setting for ring */
}; };
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
struct igb_ring_container rx, tx;
struct napi_struct napi;
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
};
struct igb_ring { struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */ struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */ struct net_device *netdev; /* back pointer to net_device */
...@@ -232,15 +215,17 @@ struct igb_ring { ...@@ -232,15 +215,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */ unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */ void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */ u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/ u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */ u8 reg_idx; /* physical index of the ring */
u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */ /* everything past this point are written often */
u16 next_to_clean ____cacheline_aligned_in_smp; u16 next_to_clean;
u16 next_to_use; u16 next_to_use;
u16 next_to_alloc;
union { union {
/* TX */ /* TX */
...@@ -251,12 +236,30 @@ struct igb_ring { ...@@ -251,12 +236,30 @@ struct igb_ring {
}; };
/* RX */ /* RX */
struct { struct {
struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats; struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp; struct u64_stats_sync rx_syncp;
}; };
}; };
/* Items past this point are only used during ring alloc / free */ } ____cacheline_internodealigned_in_smp;
dma_addr_t dma; /* phys address of the ring */
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
struct igb_ring_container rx, tx;
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
}; };
enum e1000_ring_flags_t { enum e1000_ring_flags_t {
...@@ -442,9 +445,20 @@ extern void igb_ptp_stop(struct igb_adapter *adapter); ...@@ -442,9 +445,20 @@ extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter); extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work); extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb); struct sk_buff *skb);
extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
igb_ptp_rx_rgtstamp(q_vector, skb);
}
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd); struct ifreq *ifr, int cmd);
#endif /* CONFIG_IGB_PTP */ #endif /* CONFIG_IGB_PTP */
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/highmem.h>
#include "igb.h" #include "igb.h"
...@@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, ...@@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1); memset(&skb->data[frame_size + 12], 0xAF, 1);
} }
static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
unsigned int frame_size)
{ {
frame_size /= 2; unsigned char *data;
if (*(skb->data + 3) == 0xFF) { bool match = true;
if ((*(skb->data + frame_size + 10) == 0xBE) &&
(*(skb->data + frame_size + 12) == 0xAF)) { frame_size >>= 1;
return 0;
} data = kmap(rx_buffer->page);
}
return 13; if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
kunmap(rx_buffer->page);
return match;
} }
static int igb_clean_test_rings(struct igb_ring *rx_ring, static int igb_clean_test_rings(struct igb_ring *rx_ring,
...@@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info; struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info; struct igb_tx_buffer *tx_buffer_info;
struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0; u16 rx_ntc, tx_ntc, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */ /* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean; rx_ntc = rx_ring->next_to_clean;
...@@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */ /* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */ /* sync Rx buffer for CPU read */
dma_unmap_single(rx_ring->dev, dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma, rx_buffer_info->dma,
IGB_RX_HDR_LEN, IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
/* verify contents of skb */ /* verify contents of skb */
if (!igb_check_lbtest_frame(rx_buffer_info->skb, size)) if (igb_check_lbtest_frame(rx_buffer_info, size))
count++; count++;
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* unmap buffer on tx side */ /* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
total_bytes += tx_buffer_info->bytecount;
total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */ /* increment rx/tx next to clean counters */
...@@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
} }
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); netdev_tx_reset_queue(txring_txq(tx_ring));
netdev_tx_completed_queue(txq, total_packets, total_bytes);
/* re-map buffers to ring, store next to clean values */ /* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count); igb_alloc_rx_buffers(rx_ring, count);
......
...@@ -534,31 +534,27 @@ static void igb_dump(struct igb_adapter *adapter) ...@@ -534,31 +534,27 @@ static void igb_dump(struct igb_adapter *adapter)
if (staterr & E1000_RXD_STAT_DD) { if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */ /* Descriptor Done */
pr_info("%s[0x%03X] %016llX %016llX -------" pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
"--------- %p%s\n", "RWB", i, "RWB", i,
le64_to_cpu(u0->a), le64_to_cpu(u0->a),
le64_to_cpu(u0->b), le64_to_cpu(u0->b),
buffer_info->skb, next_desc); next_desc);
} else { } else {
pr_info("%s[0x%03X] %016llX %016llX %016llX" pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
" %p%s\n", "R ", i, "R ", i,
le64_to_cpu(u0->a), le64_to_cpu(u0->a),
le64_to_cpu(u0->b), le64_to_cpu(u0->b),
(u64)buffer_info->dma, (u64)buffer_info->dma,
buffer_info->skb, next_desc); next_desc);
if (netif_msg_pktdata(adapter) && if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->skb) { buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
IGB_RX_HDR_LEN, true);
print_hex_dump(KERN_INFO, "", print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, DUMP_PREFIX_ADDRESS,
16, 1, 16, 1,
page_address(buffer_info->page) + page_address(buffer_info->page) +
buffer_info->page_offset, buffer_info->page_offset,
PAGE_SIZE/2, true); IGB_RX_BUFSZ, true);
} }
} }
} }
...@@ -656,80 +652,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) ...@@ -656,80 +652,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
} }
} }
static void igb_free_queues(struct igb_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
kfree(adapter->tx_ring[i]);
adapter->tx_ring[i] = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
kfree(adapter->rx_ring[i]);
adapter->rx_ring[i] = NULL;
}
adapter->num_rx_queues = 0;
adapter->num_tx_queues = 0;
}
/**
* igb_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
static int igb_alloc_queues(struct igb_adapter *adapter)
{
struct igb_ring *ring;
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[i] = ring;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
/*
* On i350, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
* */
if (adapter->hw.mac.type >= e1000_i350)
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
adapter->rx_ring[i] = ring;
}
igb_cache_ring_register(adapter);
return 0;
err:
igb_free_queues(adapter);
return -ENOMEM;
}
/** /**
* igb_write_ivar - configure ivar for given MSI-X vector * igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -959,6 +881,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) ...@@ -959,6 +881,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
} }
} }
/**
* igb_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
* This function frees the memory allocated to the q_vector. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
/*
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
}
/** /**
* igb_free_q_vectors - Free memory allocated for interrupt vectors * igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
...@@ -969,17 +920,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) ...@@ -969,17 +920,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
**/ **/
static void igb_free_q_vectors(struct igb_adapter *adapter) static void igb_free_q_vectors(struct igb_adapter *adapter)
{ {
int v_idx; int v_idx = adapter->num_q_vectors;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { adapter->num_tx_queues = 0;
struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; adapter->num_rx_queues = 0;
adapter->q_vector[v_idx] = NULL;
if (!q_vector)
continue;
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
adapter->num_q_vectors = 0; adapter->num_q_vectors = 0;
while (v_idx--)
igb_free_q_vector(adapter, v_idx);
} }
/** /**
...@@ -990,7 +938,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter) ...@@ -990,7 +938,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
*/ */
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{ {
igb_free_queues(adapter);
igb_free_q_vectors(adapter); igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter); igb_reset_interrupt_capability(adapter);
} }
...@@ -1001,7 +948,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) ...@@ -1001,7 +948,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
* Attempt to configure interrupts using the best available * Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel. * capabilities of the hardware and kernel.
**/ **/
static int igb_set_interrupt_capability(struct igb_adapter *adapter) static void igb_set_interrupt_capability(struct igb_adapter *adapter)
{ {
int err; int err;
int numvecs, i; int numvecs, i;
...@@ -1038,7 +985,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) ...@@ -1038,7 +985,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries, adapter->msix_entries,
numvecs); numvecs);
if (err == 0) if (err == 0)
goto out; return;
igb_reset_interrupt_capability(adapter); igb_reset_interrupt_capability(adapter);
...@@ -1068,105 +1015,183 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) ...@@ -1068,105 +1015,183 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->num_q_vectors = 1; adapter->num_q_vectors = 1;
if (!pci_enable_msi(adapter->pdev)) if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI; adapter->flags |= IGB_FLAG_HAS_MSI;
out: }
/* Notify the stack of the (possibly) reduced queue counts. */
rtnl_lock(); static void igb_add_ring(struct igb_ring *ring,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); struct igb_ring_container *head)
err = netif_set_real_num_rx_queues(adapter->netdev, {
adapter->num_rx_queues); head->ring = ring;
rtnl_unlock(); head->count++;
return err;
} }
/** /**
* igb_alloc_q_vectors - Allocate memory for interrupt vectors * igb_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
* @v_count: q_vectors allocated on adapter, used for ring interleaving
* @v_idx: index of vector in adapter struct
* @txr_count: total number of Tx rings to allocate
* @txr_idx: index of first Tx ring to allocate
* @rxr_count: total number of Rx rings to allocate
* @rxr_idx: index of first Rx ring to allocate
* *
* We allocate one q_vector per queue interrupt. If allocation fails we * We allocate one q_vector. If allocation fails we return -ENOMEM.
* return -ENOMEM.
**/ **/
static int igb_alloc_q_vectors(struct igb_adapter *adapter) static int igb_alloc_q_vector(struct igb_adapter *adapter,
int v_count, int v_idx,
int txr_count, int txr_idx,
int rxr_count, int rxr_idx)
{ {
struct igb_q_vector *q_vector; struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw; struct igb_ring *ring;
int v_idx; int ring_count, size;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { /* igb only supports 1 Tx and/or 1 Rx queue per vector */
q_vector = kzalloc(sizeof(struct igb_q_vector), if (txr_count > 1 || rxr_count > 1)
GFP_KERNEL); return -ENOMEM;
ring_count = txr_count + rxr_count;
size = sizeof(struct igb_q_vector) +
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector) if (!q_vector)
goto err_out; return -ENOMEM;
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
igb_poll, 64);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter; q_vector->adapter = adapter;
q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize ITR configuration */
q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
q_vector->itr_val = IGB_START_ITR; q_vector->itr_val = IGB_START_ITR;
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector; /* initialize pointer to rings */
ring = q_vector->ring;
if (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
igb_add_ring(ring, &q_vector->tx);
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
/* push pointer to next ring */
ring++;
} }
return 0; if (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
err_out: /* configure backlink on ring */
igb_free_q_vectors(adapter); ring->q_vector = q_vector;
return -ENOMEM;
}
static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, /* update q_vector Rx values */
int ring_idx, int v_idx) igb_add_ring(ring, &q_vector->rx);
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
q_vector->rx.ring = adapter->rx_ring[ring_idx]; /* set flag indicating ring supports SCTP checksum offload */
q_vector->rx.ring->q_vector = q_vector; if (adapter->hw.mac.type >= e1000_82576)
q_vector->rx.count++; set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
q_vector->itr_val = adapter->rx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGB_START_ITR;
}
static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, /*
int ring_idx, int v_idx) * On i350, i210, and i211, loopback VLAN packets
{ * have the tag byte-swapped.
struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; * */
if (adapter->hw.mac.type >= e1000_i350)
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
q_vector->tx.ring = adapter->tx_ring[ring_idx]; /* apply Rx specific ring traits */
q_vector->tx.ring->q_vector = q_vector; ring->count = adapter->rx_ring_count;
q_vector->tx.count++; ring->queue_index = rxr_idx;
q_vector->itr_val = adapter->tx_itr_setting;
q_vector->tx.work_limit = adapter->tx_work_limit; /* assign ring to adapter */
if (q_vector->itr_val && q_vector->itr_val <= 3) adapter->rx_ring[rxr_idx] = ring;
q_vector->itr_val = IGB_START_ITR; }
return 0;
} }
/** /**
* igb_map_ring_to_vector - maps allocated queues to vectors * igb_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
* *
* This function maps the recently allocated queues to vectors. * We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/ **/
static int igb_map_ring_to_vector(struct igb_adapter *adapter) static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{ {
int i; int q_vectors = adapter->num_q_vectors;
int v_idx = 0; int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
int err;
if ((adapter->num_q_vectors < adapter->num_rx_queues) || if (q_vectors >= (rxr_remaining + txr_remaining)) {
(adapter->num_q_vectors < adapter->num_tx_queues)) for (; rxr_remaining; v_idx++) {
return -ENOMEM; err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
0, 0, 1, rxr_idx);
if (adapter->num_q_vectors >= if (err)
(adapter->num_rx_queues + adapter->num_tx_queues)) { goto err_out;
for (i = 0; i < adapter->num_rx_queues; i++)
igb_map_rx_ring_to_vector(adapter, i, v_idx++); /* update counts and index */
for (i = 0; i < adapter->num_tx_queues; i++) rxr_remaining--;
igb_map_tx_ring_to_vector(adapter, i, v_idx++); rxr_idx++;
} else { }
for (i = 0; i < adapter->num_rx_queues; i++) {
if (i < adapter->num_tx_queues)
igb_map_tx_ring_to_vector(adapter, i, v_idx);
igb_map_rx_ring_to_vector(adapter, i, v_idx++);
} }
for (; i < adapter->num_tx_queues; i++)
igb_map_tx_ring_to_vector(adapter, i, v_idx++); for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
txr_remaining -= tqpv;
rxr_idx++;
txr_idx++;
} }
return 0; return 0;
err_out:
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
while (v_idx--)
igb_free_q_vector(adapter, v_idx);
return -ENOMEM;
} }
/** /**
...@@ -1179,9 +1204,7 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) ...@@ -1179,9 +1204,7 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
int err; int err;
err = igb_set_interrupt_capability(adapter); igb_set_interrupt_capability(adapter);
if (err)
return err;
err = igb_alloc_q_vectors(adapter); err = igb_alloc_q_vectors(adapter);
if (err) { if (err) {
...@@ -1189,24 +1212,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) ...@@ -1189,24 +1212,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
goto err_alloc_q_vectors; goto err_alloc_q_vectors;
} }
err = igb_alloc_queues(adapter); igb_cache_ring_register(adapter);
if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
err = igb_map_ring_to_vector(adapter);
if (err) {
dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
goto err_map_queues;
}
return 0; return 0;
err_map_queues:
igb_free_queues(adapter);
err_alloc_queues:
igb_free_q_vectors(adapter);
err_alloc_q_vectors: err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter); igb_reset_interrupt_capability(adapter);
return err; return err;
...@@ -1229,11 +1238,11 @@ static int igb_request_irq(struct igb_adapter *adapter) ...@@ -1229,11 +1238,11 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (!err) if (!err)
goto request_done; goto request_done;
/* fall back to MSI */ /* fall back to MSI */
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
igb_clear_interrupt_scheme(adapter); igb_clear_interrupt_scheme(adapter);
if (!pci_enable_msi(pdev)) if (!pci_enable_msi(pdev))
adapter->flags |= IGB_FLAG_HAS_MSI; adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
adapter->num_tx_queues = 1; adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1; adapter->num_rx_queues = 1;
adapter->num_q_vectors = 1; adapter->num_q_vectors = 1;
...@@ -1243,13 +1252,6 @@ static int igb_request_irq(struct igb_adapter *adapter) ...@@ -1243,13 +1252,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
"Unable to allocate memory for vectors\n"); "Unable to allocate memory for vectors\n");
goto request_done; goto request_done;
} }
err = igb_alloc_queues(adapter);
if (err) {
dev_err(&pdev->dev,
"Unable to allocate memory for queues\n");
igb_free_q_vectors(adapter);
goto request_done;
}
igb_setup_all_tx_resources(adapter); igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter); igb_setup_all_rx_resources(adapter);
} }
...@@ -2531,6 +2533,17 @@ static int __igb_open(struct net_device *netdev, bool resuming) ...@@ -2531,6 +2533,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
if (err) if (err)
goto err_req_irq; goto err_req_irq;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(adapter->netdev,
adapter->num_tx_queues);
if (err)
goto err_set_queues;
err = netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
if (err)
goto err_set_queues;
/* From here on the code is the same as igb_up() */ /* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state); clear_bit(__IGB_DOWN, &adapter->state);
...@@ -2560,6 +2573,8 @@ static int __igb_open(struct net_device *netdev, bool resuming) ...@@ -2560,6 +2573,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
return 0; return 0;
err_set_queues:
igb_free_irq(adapter);
err_req_irq: err_req_irq:
igb_release_hw_control(adapter); igb_release_hw_control(adapter);
igb_power_down_link(adapter); igb_power_down_link(adapter);
...@@ -2637,10 +2652,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) ...@@ -2637,10 +2652,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
tx_ring->size, &tx_ring->dma, GFP_KERNEL);
&tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) if (!tx_ring->desc)
goto err; goto err;
...@@ -2777,18 +2790,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) ...@@ -2777,18 +2790,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
goto err; goto err;
/* Round up to nearest 4K */ /* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
rx_ring->size, &rx_ring->dma, GFP_KERNEL);
&rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) if (!rx_ring->desc)
goto err; goto err;
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
...@@ -3106,12 +3117,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3106,12 +3117,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */ /* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
#else
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
#ifdef CONFIG_IGB_PTP #ifdef CONFIG_IGB_PTP
if (hw->mac.type >= e1000_82580) if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP; srrctl |= E1000_SRRCTL_TIMESTAMP;
...@@ -3305,36 +3312,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) ...@@ -3305,36 +3312,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
unsigned long size; unsigned long size;
u16 i; u16 i;
if (rx_ring->skb)
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
return; return;
/* Free all the Rx ring sk_buffs */ /* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) { for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
if (buffer_info->dma) {
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) { if (!buffer_info->page)
dev_kfree_skb(buffer_info->skb); continue;
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
dma_unmap_page(rx_ring->dev, dma_unmap_page(rx_ring->dev,
buffer_info->page_dma, buffer_info->dma,
PAGE_SIZE / 2, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
buffer_info->page_dma = 0; __free_page(buffer_info->page);
}
if (buffer_info->page) {
put_page(buffer_info->page);
buffer_info->page = NULL; buffer_info->page = NULL;
buffer_info->page_offset = 0;
}
} }
size = sizeof(struct igb_rx_buffer) * rx_ring->count; size = sizeof(struct igb_rx_buffer) * rx_ring->count;
...@@ -3343,6 +3341,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) ...@@ -3343,6 +3341,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size); memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
} }
...@@ -4467,10 +4466,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, ...@@ -4467,10 +4466,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* The minimum packet size with TCTL.PSP set is 17 so pad the skb * The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement. * in order to meet this minimum size requirement.
*/ */
if (skb->len < 17) { if (unlikely(skb->len < 17)) {
if (skb_padto(skb, 17)) if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK; return NETDEV_TX_OK;
skb->len = 17; skb->len = 17;
skb_set_tail_pointer(skb, 17);
} }
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
...@@ -4851,56 +4851,74 @@ static irqreturn_t igb_msix_ring(int irq, void *data) ...@@ -4851,56 +4851,74 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
} }
#ifdef CONFIG_IGB_DCA #ifdef CONFIG_IGB_DCA
static void igb_update_dca(struct igb_q_vector *q_vector) static void igb_update_tx_dca(struct igb_adapter *adapter,
struct igb_ring *tx_ring,
int cpu)
{ {
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu(); u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
if (q_vector->cpu == cpu) if (hw->mac.type != e1000_82575)
goto out_no_update; txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
if (q_vector->tx.ring) { /*
int q = q_vector->tx.ring->reg_idx; * We can enable relaxed ordering for reads, but not writes when
u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); * DCA is enabled. This is due to a known issue in some chipsets
if (hw->mac.type == e1000_82575) { * which will cause the DCA tag to be cleared.
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; */
dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
} else { E1000_DCA_TXCTRL_DATA_RRO_EN |
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; E1000_DCA_TXCTRL_DESC_DCA_EN;
dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_TXCTRL_CPUID_SHIFT; wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
}
dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
}
if (q_vector->rx.ring) {
int q = q_vector->rx.ring->reg_idx;
u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
if (hw->mac.type == e1000_82575) {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
} else {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_RXCTRL_CPUID_SHIFT;
}
dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
}
q_vector->cpu = cpu;
out_no_update:
put_cpu();
} }
static void igb_setup_dca(struct igb_adapter *adapter) static void igb_update_rx_dca(struct igb_adapter *adapter,
struct igb_ring *rx_ring,
int cpu)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int i; u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) if (hw->mac.type != e1000_82575)
rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
/*
* We can enable relaxed ordering for reads, but not writes when
* DCA is enabled. This is due to a known issue in some chipsets
* which will cause the DCA tag to be cleared.
*/
rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
E1000_DCA_RXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
}
static void igb_update_dca(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
if (q_vector->tx.ring)
igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
if (q_vector->rx.ring)
igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
q_vector->cpu = cpu;
out_no_update:
put_cpu();
}
static void igb_setup_dca(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
int i;
if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
return; return;
/* Always use CB2 mode, difference is masked in the CB driver. */ /* Always use CB2 mode, difference is masked in the CB driver. */
...@@ -5840,6 +5858,183 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5840,6 +5858,183 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
return !!budget; return !!budget;
} }
/**
* igb_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
**/
static void igb_reuse_rx_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *old_buff)
{
struct igb_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_buffer_info[nta];
/* update, and store next to alloc */
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
old_buff->page_offset,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
}
/**
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
*
* This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
*
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/
static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
#ifdef CONFIG_IGB_PTP
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
va += IGB_TS_HDR_LEN;
size -= IGB_TS_HDR_LEN;
}
#endif
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(page) == numa_node_id()))
return true;
/* this page cannot be reused so discard it */
put_page(page);
return false;
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rx_buffer->page_offset, size, IGB_RX_BUFSZ);
/* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id()))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1))
return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
/*
* since we are the only owner of the page and we need to
* increment it, just set the value to 2 in order to avoid
* an unnecessary locked operation
*/
atomic_set(&page->_count, 2);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += SKB_DATA_ALIGN(size);
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif
return true;
}
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct igb_rx_buffer *rx_buffer;
struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
/*
* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
rmb();
page = rx_buffer->page;
prefetchw(page);
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
IGB_RX_HDR_LEN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
}
/*
* we will be copying header into skb->data in
* pskb_may_pull so it is in our interest to prefetch
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* pull page into skb */
if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
return skb;
}
static inline void igb_rx_checksum(struct igb_ring *ring, static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc, union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -5889,147 +6084,342 @@ static inline void igb_rx_hash(struct igb_ring *ring, ...@@ -5889,147 +6084,342 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
} }
static void igb_rx_vlan(struct igb_ring *ring, /**
union e1000_adv_rx_desc *rx_desc, * igb_is_non_eop - process handling of non-EOP buffers
struct sk_buff *skb) * @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
* @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
**/
static bool igb_is_non_eop(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
{ {
if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { u32 ntc = rx_ring->next_to_clean + 1;
u16 vid;
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid); /* fetch, update, and store next to clean */
} ntc = (ntc < rx_ring->count) ? ntc : 0;
} rx_ring->next_to_clean = ntc;
static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) prefetch(IGB_RX_DESC(rx_ring, ntc));
{
/* HW will not DMA in data larger than the given buffer, even if it if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
* parses the (NFS, of course) header to be larger. In that case, it return false;
* fills the header buffer and spills the rest into the page.
*/ return true;
u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
if (hlen > IGB_RX_HDR_LEN)
hlen = IGB_RX_HDR_LEN;
return hlen;
} }
static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) /**
{ * igb_get_headlen - determine size of header for LRO/GRO
struct igb_ring *rx_ring = q_vector->rx.ring; * @data: pointer to the start of the headers
union e1000_adv_rx_desc *rx_desc; * @max_len: total length of section to find headers in
const int current_node = numa_node_id(); *
unsigned int total_bytes = 0, total_packets = 0; * This function is meant to determine the length of headers that will
u16 cleaned_count = igb_desc_unused(rx_ring); * be recognized by hardware for LRO, and GRO offloads. The main
u16 i = rx_ring->next_to_clean; * motivation of doing this is to only perform one pull for IPv4 TCP
* packets so that we can do basic things like calculating the gso_size
* based on the average data per packet.
**/
static unsigned int igb_get_headlen(unsigned char *data,
unsigned int max_len)
{
union {
unsigned char *network;
/* l2 headers */
struct ethhdr *eth;
struct vlan_hdr *vlan;
/* l3 headers */
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
} hdr;
__be16 protocol;
u8 nexthdr = 0; /* default to not TCP */
u8 hlen;
/* this should never happen, but better safe than sorry */
if (max_len < ETH_HLEN)
return max_len;
/* initialize network frame pointer */
hdr.network = data;
/* set first protocol and move network header forward */
protocol = hdr.eth->h_proto;
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
if (protocol == __constant_htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
protocol = hdr.vlan->h_vlan_encapsulated_proto;
hdr.network += VLAN_HLEN;
}
/* handle L3 protocols */
if (protocol == __constant_htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
/* access ihl as a u8 to avoid unaligned access on ia64 */
hlen = (hdr.network[0] & 0x0F) << 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct iphdr))
return hdr.network - data;
/* record next protocol */
nexthdr = hdr.ipv4->protocol;
hdr.network += hlen;
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
/* record next protocol */
nexthdr = hdr.ipv6->nexthdr;
hdr.network += sizeof(struct ipv6hdr);
} else {
return hdr.network - data;
}
rx_desc = IGB_RX_DESC(rx_ring, i); /* finally sort out TCP */
if (nexthdr == IPPROTO_TCP) {
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
return max_len;
while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { /* access doff as a u8 to avoid unaligned access on ia64 */
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; hlen = (hdr.network[12] & 0xF0) >> 2;
struct sk_buff *skb = buffer_info->skb;
union e1000_adv_rx_desc *next_rxd;
buffer_info->skb = NULL; /* verify hlen meets minimum size requirements */
prefetch(skb->data); if (hlen < sizeof(struct tcphdr))
return hdr.network - data;
i++; hdr.network += hlen;
if (i == rx_ring->count) } else if (nexthdr == IPPROTO_UDP) {
i = 0; if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
return max_len;
next_rxd = IGB_RX_DESC(rx_ring, i); hdr.network += sizeof(struct udphdr);
prefetch(next_rxd); }
/* /*
* This memory barrier is needed to keep us from reading * If everything has gone correctly hdr.network should be the
* any other fields out of the rx_desc until we know the * data section of the packet and will be the end of the header.
* RXD_STAT_DD bit is set * If not then it probably represents the end of the last recognized
* header.
*/ */
rmb(); if ((hdr.network - data) < max_len)
return hdr.network - data;
if (!skb_is_nonlinear(skb)) { else
__skb_put(skb, igb_get_hlen(rx_desc)); return max_len;
dma_unmap_single(rx_ring->dev, buffer_info->dma, }
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
if (rx_desc->wb.upper.length) { /**
u16 length = le16_to_cpu(rx_desc->wb.upper.length); * igb_pull_tail - igb specific version of skb_pull_tail
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being adjusted
*
* This function is an igb specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
* As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb.
*/
static void igb_pull_tail(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, /*
buffer_info->page, * it is valid to use page_address instead of kmap since we are
buffer_info->page_offset, * working with pages allocated out of the lomem pool per
length); * alloc_page(GFP_ATOMIC)
*/
va = skb_frag_address(frag);
skb->len += length; #ifdef CONFIG_IGB_PTP
skb->data_len += length; if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
skb->truesize += PAGE_SIZE / 2; /* retrieve timestamp from buffer */
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
if ((page_count(buffer_info->page) != 1) || /* update pointers to remove timestamp header */
(page_to_nid(buffer_info->page) != current_node)) skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
buffer_info->page = NULL; frag->page_offset += IGB_TS_HDR_LEN;
else skb->data_len -= IGB_TS_HDR_LEN;
get_page(buffer_info->page); skb->len -= IGB_TS_HDR_LEN;
dma_unmap_page(rx_ring->dev, buffer_info->page_dma, /* move va to start of packet data */
PAGE_SIZE / 2, DMA_FROM_DEVICE); va += IGB_TS_HDR_LEN;
buffer_info->page_dma = 0;
} }
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) { #endif
struct igb_rx_buffer *next_buffer; /*
next_buffer = &rx_ring->rx_buffer_info[i]; * we need the header to contain the greater of either ETH_HLEN or
buffer_info->skb = next_buffer->skb; * 60 bytes if the skb->len is less than 60 for skb_pad.
buffer_info->dma = next_buffer->dma; */
next_buffer->skb = skb; pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
next_buffer->dma = 0;
goto next_desc; /* align pull length to size of long to optimize memcpy performance */
} skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
frag->page_offset += pull_len;
skb->data_len -= pull_len;
skb->tail += pull_len;
}
/**
* igb_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed
*
* Address the case where we are pulling data in on pages only
* and as such no data is present in the skb header.
*
* In addition if skb is not at least 60 bytes we need to pad it so that
* it is large enough to qualify as a valid Ethernet frame.
*
* Returns true if an error was encountered and skb was freed.
**/
static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
if (unlikely((igb_test_staterr(rx_desc, if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)) E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
&& !(rx_ring->netdev->features & NETIF_F_RXALL))) { struct net_device *netdev = rx_ring->netdev;
if (!(netdev->features & NETIF_F_RXALL)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
goto next_desc; return true;
}
} }
#ifdef CONFIG_IGB_PTP /* place header in linear portion of buffer */
igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb); if (skb_is_nonlinear(skb))
#endif /* CONFIG_IGB_PTP */ igb_pull_tail(rx_ring, rx_desc, skb);
/* if skb_pad returns an error the skb was freed */
if (unlikely(skb->len < 60)) {
int pad_len = 60 - skb->len;
if (skb_pad(skb, pad_len))
return true;
__skb_put(skb, pad_len);
}
return false;
}
/**
* igb_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
static void igb_process_skb_fields(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
igb_rx_hash(rx_ring, rx_desc, skb); igb_rx_hash(rx_ring, rx_desc, skb);
igb_rx_checksum(rx_ring, rx_desc, skb); igb_rx_checksum(rx_ring, rx_desc, skb);
igb_rx_vlan(rx_ring, rx_desc, skb);
total_bytes += skb->len; #ifdef CONFIG_IGB_PTP
total_packets++; igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
#endif /* CONFIG_IGB_PTP */
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid);
}
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
napi_gro_receive(&q_vector->napi, skb); static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
budget--; do {
next_desc: union e1000_adv_rx_desc *rx_desc;
if (!budget)
break;
cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) { if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
igb_alloc_rx_buffers(rx_ring, cleaned_count); igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
/* use prefetched values */ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
rx_desc = next_rxd;
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break;
/* retrieve a buffer from the ring */
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
cleaned_count++;
/* fetch next buffer in frame if non-eop */
if (igb_is_non_eop(rx_ring, rx_desc))
continue;
/* verify the packet layout is correct */
if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
} }
rx_ring->next_to_clean = i; /* probably a little skewed due to removing CRC */
total_bytes += skb->len;
/* populate checksum, timestamp, VLAN, and protocol */
igb_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&q_vector->napi, skb);
/* reset skb pointer */
skb = NULL;
/* update budget accounting */
total_packets++;
} while (likely(total_packets < budget));
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->rx_syncp); u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes; rx_ring->rx_stats.bytes += total_bytes;
...@@ -6040,73 +6430,44 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) ...@@ -6040,73 +6430,44 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
if (cleaned_count) if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count); igb_alloc_rx_buffers(rx_ring, cleaned_count);
return !!budget; return (total_packets < budget);
} }
static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi) struct igb_rx_buffer *bi)
{ {
struct sk_buff *skb = bi->skb; struct page *page = bi->page;
dma_addr_t dma = bi->dma; dma_addr_t dma;
if (dma) /* since we are recycling buffers we should seldom need to alloc */
if (likely(page))
return true; return true;
if (likely(!skb)) { /* alloc new page for storage */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
IGB_RX_HDR_LEN); if (unlikely(!page)) {
bi->skb = skb;
if (!skb) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return false; return false;
} }
/* initialize skb for ring */ /* map page for use */
skb_record_rx_queue(skb, rx_ring->queue_index); dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
}
dma = dma_map_single(rx_ring->dev, skb->data,
IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
/*
* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page);
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return false; return false;
} }
bi->dma = dma; bi->dma = dma;
return true;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t page_dma = bi->page_dma;
unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
if (page_dma)
return true;
if (!page) {
page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
bi->page = page; bi->page = page;
if (unlikely(!page)) { bi->page_offset = 0;
rx_ring->rx_stats.alloc_failed++;
return false;
}
}
page_dma = dma_map_page(rx_ring->dev, page,
page_offset, PAGE_SIZE / 2,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
bi->page_dma = page_dma;
bi->page_offset = page_offset;
return true; return true;
} }
...@@ -6120,22 +6481,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -6120,22 +6481,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
struct igb_rx_buffer *bi; struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
/* nothing to do */
if (!cleaned_count)
return;
rx_desc = IGB_RX_DESC(rx_ring, i); rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i]; bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; i -= rx_ring->count;
while (cleaned_count--) { do {
if (!igb_alloc_mapped_skb(rx_ring, bi))
break;
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. */
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
if (!igb_alloc_mapped_page(rx_ring, bi)) if (!igb_alloc_mapped_page(rx_ring, bi))
break; break;
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); /*
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++; rx_desc++;
bi++; bi++;
...@@ -6148,17 +6510,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -6148,17 +6510,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* clear the hdr_addr for the next_to_use descriptor */ /* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0; rx_desc->read.hdr_addr = 0;
}
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count; i += rx_ring->count;
if (rx_ring->next_to_use != i) { if (rx_ring->next_to_use != i) {
/* record the next descriptor to use */
rx_ring->next_to_use = i; rx_ring->next_to_use = i;
/* Force memory writes to complete before letting h/w /* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). */ * such as IA-64).
*/
wmb(); wmb();
writel(i, rx_ring->tail); writel(i, rx_ring->tail);
} }
...@@ -6492,7 +6862,9 @@ static int igb_resume(struct device *dev) ...@@ -6492,7 +6862,9 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0); wr32(E1000_WUS, ~0);
if (netdev->flags & IFF_UP) { if (netdev->flags & IFF_UP) {
rtnl_lock();
err = __igb_open(netdev, true); err = __igb_open(netdev, true);
rtnl_unlock();
if (err) if (err)
return err; return err;
} }
......
...@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) ...@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL; adapter->ptp_tx_skb = NULL;
} }
void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, /**
union e1000_adv_rx_desc *rx_desc, * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8.
*/
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb)
{
u64 *regval = (u64 *)va;
/*
* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
}
/**
* igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
* @q_vector: Pointer to interrupt specific structure
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
*/
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u64 regval; u64 regval;
if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
E1000_RXDADV_STAT_TS))
return;
/* /*
* If this bit is set, then the RX registers contain the time stamp. No * If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so * other packet will be time stamped until we read these registers, so
...@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, ...@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we * If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps. * can turn into a skb_shared_hwtstamps.
*/ */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
u32 *stamp = (u32 *)skb->data;
regval = le32_to_cpu(*(stamp + 2));
regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
skb_pull(skb, IGB_TS_HDR_LEN);
} else {
if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
return; return;
regval = rd32(E1000_RXSTMPL); regval = rd32(E1000_RXSTMPL);
regval |= (u64)rd32(E1000_RXSTMPH) << 32; regval |= (u64)rd32(E1000_RXSTMPH) << 32;
}
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
} }
......
...@@ -135,6 +135,7 @@ struct vf_data_storage { ...@@ -135,6 +135,7 @@ struct vf_data_storage {
u16 tx_rate; u16 tx_rate;
u16 vlan_count; u16 vlan_count;
u8 spoofchk_enabled; u8 spoofchk_enabled;
unsigned int vf_api;
}; };
struct vf_macvlans { struct vf_macvlans {
......
...@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev) ...@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
return -EINVAL; return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n"); e_info(drv, "Enabling FCoE offload features.\n");
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev); netdev->netdev_ops->ndo_stop(netdev);
......
...@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* setup affinity mask and node */ /* setup affinity mask and node */
if (cpu != -1) if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask); cpumask_set_cpu(cpu, &q_vector->affinity_mask);
else
cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node; q_vector->numa_node = node;
#ifdef CONFIG_IXGBE_DCA
/* initialize CPU for DCA */
q_vector->cpu = -1;
#endif
/* initialize NAPI */ /* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi, netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64); ixgbe_poll, 64);
......
...@@ -3263,6 +3263,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ...@@ -3263,6 +3263,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd &= ~IXGBE_MHADD_MFS_MASK;
...@@ -4828,14 +4833,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -4828,14 +4833,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL; return -EINVAL;
/* /*
* For 82599EB we cannot allow PF to change MTU greater than 1500 * For 82599EB we cannot allow legacy VFs to enable their receive
* in SR-IOV mode as it may cause buffer overruns in guest VFs that * paths when MTU greater than 1500 is configured. So display a
* don't allocate and chain buffers correctly. * warning that legacy VFs will be disabled.
*/ */
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) && (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
return -EINVAL; e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
......
...@@ -62,12 +62,29 @@ ...@@ -62,12 +62,29 @@
/* bits 23:16 are used for exra info for certain messages */ /* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* Each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */ #define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ #define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* length of permanent address message returned from PF */ /* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4 #define IXGBE_VF_PERMADDR_MSG_LEN 4
......
...@@ -150,16 +150,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, ...@@ -150,16 +150,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED); IXGBE_FLAG2_RSC_ENABLED);
#ifdef IXGBE_FCOE
/*
* When SR-IOV is enabled 82599 cannot support jumbo frames
* so we must disable FCoE because we cannot support FCoE MTU.
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
IXGBE_FLAG_FCOE_CAPABLE);
#endif
/* enable spoof checking for all VFs */ /* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++) for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true; adapter->vfinfo[i].spoofchk_enabled = true;
...@@ -265,8 +255,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) ...@@ -265,8 +255,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
} }
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf) u32 *msgbuf, u32 vf)
{ {
int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int i; int i;
...@@ -353,31 +346,77 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, ...@@ -353,31 +346,77 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
} }
static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int new_mtu = msgbuf[1]; int max_frame = msgbuf[1];
u32 max_frs; u32 max_frs;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Only X540 supports jumbo frames in IOV mode */ /*
if (adapter->hw.mac.type != ixgbe_mac_X540) * For 82599EB we have to keep all PFs and VFs operating with
return; * the same max_frame value in order to avoid sending an oversize
* frame to a VF. In order to guarantee this is handled correctly
* for all cases we have several special exceptions to take into
* account before we can enable the VF for receive
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
s32 err = 0;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
/*
* If the PF or VF are running w/ jumbo frames enabled we
* need to shut down the VF Rx path as we cannot support
* jumbo frames on legacy VFs
*/
if ((pf_max_frame > ETH_FRAME_LEN) ||
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
err = -EINVAL;
/* determine VF receive enable location */
vf_shift = vf % 32;
reg_offset = vf / 32;
/* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err)
vfre &= ~(1 << vf_shift);
else
vfre |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) {
e_err(drv, "VF max_frame %d out of range\n", max_frame);
return err;
}
}
/* MTU < 68 is an error and causes problems on some kernels */ /* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
e_err(drv, "VF mtu %d out of range\n", new_mtu); e_err(drv, "VF max_frame %d out of range\n", max_frame);
return; return -EINVAL;
} }
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & /* pull current max frame size from hardware */
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
if (max_frs < new_mtu) { max_frs &= IXGBE_MHADD_MFS_MASK;
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; max_frs >>= IXGBE_MHADD_MFS_SHIFT;
if (max_frs < max_frame) {
max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
} }
e_info(hw, "VF requests change max MTU to %d\n", new_mtu); e_info(hw, "VF requests change max MTU to %d\n", max_frame);
return 0;
} }
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
...@@ -430,6 +469,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ...@@ -430,6 +469,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_rx_mode(adapter->netdev); ixgbe_set_rx_mode(adapter->netdev);
hw->mac.ops.clear_rar(hw, rar_entry); hw->mac.ops.clear_rar(hw, rar_entry);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
} }
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
...@@ -521,30 +563,179 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) ...@@ -521,30 +563,179 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0; return 0;
} }
static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 reg; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u32 reg, msgbuf[4];
u32 reg_offset, vf_shift; u32 reg_offset, vf_shift;
u8 *addr = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
/* reset the filters for the device */
ixgbe_vf_reset_event(adapter, vf);
/* set vf mac address */
ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32; vf_shift = vf % 32;
reg_offset = vf / 32; reg_offset = vf / 32;
/* enable transmit and receive for vf */ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift)); reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift)); reg |= 1 << vf_shift;
/*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN)
reg &= ~(1 << vf_shift);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
/* enable VF mailbox for further messages */
adapter->vfinfo[vf].clear_to_send = true;
/* Enable counting of spoofed packets in the SSVPC register */ /* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift); reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
ixgbe_vf_reset_event(adapter, vf); /* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
memcpy(addr, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
*/
msgbuf[3] = hw->mac.mc_filter_type;
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
return 0;
}
static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
if (!is_valid_ether_addr(new_mac)) {
e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
return -1;
}
if (adapter->vfinfo[vf].pf_set_mac &&
memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
ETH_ALEN)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
vf);
return -1;
}
return ixgbe_set_vf_mac(adapter, vf, new_mac);
}
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
int err;
if (adapter->vfinfo[vf].pf_vlan) {
e_warn(drv,
"VF %d attempted to override administratively set VLAN configuration\n"
"Reload the VF driver to resume operations\n",
vf);
return -1;
}
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!err && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
return err;
}
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
int err;
if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
e_warn(drv,
"VF %d requested MACVLAN filter but is administratively denied\n",
vf);
return -1;
}
/* An non-zero index indicates the VF is setting a filter */
if (index) {
if (!is_valid_ether_addr(new_mac)) {
e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
return -1;
}
/*
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives.
*/
if (adapter->vfinfo[vf].spoofchk_enabled)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
}
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
if (err == -ENOSPC)
e_warn(drv,
"VF %d has requested a MACVLAN filter but there is no space for it\n",
vf);
return err;
}
static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
int api = msgbuf[1];
switch (api) {
case ixgbe_mbox_api_10:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
break;
}
e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
return -1;
} }
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
...@@ -553,10 +744,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -553,10 +744,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
s32 retval; s32 retval;
int entries;
u16 *hash_list;
int add, vid, index;
u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
...@@ -572,39 +759,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -572,39 +759,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* flush the ack before we write any messages back */ /* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_FLUSH(hw);
if (msgbuf[0] == IXGBE_VF_RESET)
return ixgbe_vf_reset_msg(adapter, vf);
/* /*
* until the vf completes a virtual function reset it should not be * until the vf completes a virtual function reset it should not be
* allowed to start any configuration. * allowed to start any configuration.
*/ */
if (msgbuf[0] == IXGBE_VF_RESET) {
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
new_mac = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
adapter->vfinfo[vf].clear_to_send = false;
ixgbe_vf_reset_msg(adapter, vf);
adapter->vfinfo[vf].clear_to_send = true;
if (is_valid_ether_addr(new_mac) &&
!adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, vf_mac);
else
ixgbe_set_vf_mac(adapter,
vf, adapter->vfinfo[vf].vf_mac_addresses);
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
memcpy(new_mac, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
*/
msgbuf[3] = hw->mac.mc_filter_type;
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
return retval;
}
if (!adapter->vfinfo[vf].clear_to_send) { if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf); ixgbe_write_mbx(hw, msgbuf, 1, vf);
...@@ -613,70 +774,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -613,70 +774,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) { switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR: case IXGBE_VF_SET_MAC_ADDR:
new_mac = ((u8 *)(&msgbuf[1])); retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
if (is_valid_ether_addr(new_mac) &&
!adapter->vfinfo[vf].pf_set_mac) {
ixgbe_set_vf_mac(adapter, vf, new_mac);
} else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
new_mac, ETH_ALEN)) {
e_warn(drv, "VF %d attempted to override "
"administratively set MAC address\nReload "
"the VF driver to resume operations\n", vf);
retval = -1;
}
break; break;
case IXGBE_VF_SET_MULTICAST: case IXGBE_VF_SET_MULTICAST:
entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
>> IXGBE_VT_MSGINFO_SHIFT;
hash_list = (u16 *)&msgbuf[1];
retval = ixgbe_set_vf_multicasts(adapter, entries,
hash_list, vf);
break;
case IXGBE_VF_SET_LPE:
ixgbe_set_vf_lpe(adapter, msgbuf);
break; break;
case IXGBE_VF_SET_VLAN: case IXGBE_VF_SET_VLAN:
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
>> IXGBE_VT_MSGINFO_SHIFT; break;
vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); case IXGBE_VF_SET_LPE:
if (adapter->vfinfo[vf].pf_vlan) { retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
e_warn(drv, "VF %d attempted to override "
"administratively set VLAN configuration\n"
"Reload the VF driver to resume operations\n",
vf);
retval = -1;
} else {
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
}
break; break;
case IXGBE_VF_SET_MACVLAN: case IXGBE_VF_SET_MACVLAN:
index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
IXGBE_VT_MSGINFO_SHIFT;
if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
e_warn(drv, "VF %d requested MACVLAN filter but is "
"administratively denied\n", vf);
retval = -1;
break; break;
} case IXGBE_VF_API_NEGOTIATE:
/* retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives. An index
* greater than 0 will indicate the VF is setting a
* macvlan MAC filter.
*/
if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
retval = ixgbe_set_vf_macvlan(adapter, vf, index,
(unsigned char *)(&msgbuf[1]));
if (retval == -ENOSPC)
e_warn(drv, "VF %d has requested a MACVLAN filter "
"but there is no space for it\n", vf);
break; break;
default: default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
...@@ -692,7 +805,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -692,7 +805,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
ixgbe_write_mbx(hw, msgbuf, 1, vf); ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
return retval; return retval;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment