Commit 2a47fa45 authored by John Fastabend's avatar John Fastabend Committed by David S. Miller

ixgbe: enable l2 forwarding acceleration for macvlans

Now that l2 acceleration ops are in place from the prior patch,
enable ixgbe to take advantage of these operations.  Allow it to
allocate queues for a macvlan so that when we transmit a frame,
we can do the switching in hardware inside the ixgbe card, rather
than in software.
Signed-off-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a6cc0cfa
...@@ -223,6 +223,15 @@ enum ixgbe_ring_state_t { ...@@ -223,6 +223,15 @@ enum ixgbe_ring_state_t {
__IXGBE_RX_FCOE, __IXGBE_RX_FCOE,
}; };
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
struct ixgbe_adapter *real_adapter;
unsigned int tx_base_queue;
unsigned int rx_base_queue;
int pool;
};
#define check_for_tx_hang(ring) \ #define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \ #define set_check_for_tx_hang(ring) \
...@@ -240,6 +249,7 @@ struct ixgbe_ring { ...@@ -240,6 +249,7 @@ struct ixgbe_ring {
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */ struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */ struct device *dev; /* device for DMA mapping */
struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
union { union {
struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_tx_buffer *tx_buffer_info;
...@@ -297,6 +307,12 @@ enum ixgbe_ring_f_enum { ...@@ -297,6 +307,12 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8 #define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 31
#define IXGBE_MAX_DCBMACVLANS 8
struct ixgbe_ring_feature { struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */ u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */ u16 indices; /* current value of indices */
...@@ -766,6 +782,7 @@ struct ixgbe_adapter { ...@@ -766,6 +782,7 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/ #endif /*CONFIG_DEBUG_FS*/
u8 default_up; u8 default_up;
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
}; };
struct ixgbe_fdir_filter { struct ixgbe_fdir_filter {
...@@ -939,4 +956,7 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); ...@@ -939,4 +956,7 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif #endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring);
#endif /* _IXGBE_H_ */ #endif /* _IXGBE_H_ */
...@@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
u16 fcoe_i = 0; u16 fcoe_i = 0;
#endif #endif
bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */ /* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
...@@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */ /* 64 pool mode with 2 queues per pool */
if ((vmdq_i > 32) || (rss_i < 4)) { if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2); rss_i = min_t(u16, rss_i, 2);
...@@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */ /* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx; if (adapter->num_rx_pools > 1)
ring->queue_index =
txr_idx % adapter->num_rx_queues_per_pool;
else
ring->queue_index = txr_idx;
/* assign ring to adapter */ /* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring; adapter->tx_ring[txr_idx] = ring;
...@@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */ /* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count; ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx; if (adapter->num_rx_pools > 1)
ring->queue_index =
rxr_idx % adapter->num_rx_queues_per_pool;
else
ring->queue_index = rxr_idx;
/* assign ring to adapter */ /* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring; adapter->rx_ring[rxr_idx] = ring;
......
...@@ -223,17 +223,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) ...@@ -223,17 +223,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_FLUSH(hw);
/* Disable VMDq flag so device will be set in VM mode */ /* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->ring_feature[RING_F_VMDQ].offset = 0; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
} else {
rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
}
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_VMDQ].offset = 0;
adapter->ring_feature[RING_F_RSS].limit = rss; adapter->ring_feature[RING_F_RSS].limit = rss;
/* take a breather then clean up driver data */ /* take a breather then clean up driver data */
msleep(100); msleep(100);
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
return 0; return 0;
} }
...@@ -298,13 +300,10 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) ...@@ -298,13 +300,10 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
err = ixgbe_disable_sriov(adapter); err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */ /* Only reinit if no error and state changed */
if (!err && current_flags != adapter->flags) {
/* ixgbe_disable_sriov() doesn't clear VMDQ flag */
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
if (!err && current_flags != adapter->flags)
ixgbe_sriov_reinit(adapter); ixgbe_sriov_reinit(adapter);
#endif #endif
}
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment