Commit f7cbdb7d authored by David S. Miller's avatar David S. Miller

Merge branch 'ixgbe-next'

Aaron Brown says:

====================
Intel Wired LAN Driver Updates

This series contains updates to ixgbe and ixgbevf.

John adds rtnl lock / unlock semantics for ixgbe_reinit_locked()
which was being called without the rtnl lock being held.

Jacob corrects an issue where ixgbevf_qv_disable function does not
set the disabled bit correctly.

From the community, Wei uses a type of struct for pci driver-specific
data in ixgbevf_suspend()

Don changes the way we store ring arrays in a manner that allows
support of multiple queues on multiple nodes and creates new ring
initialization functions for work previously done across multiple
functions - making the code closer to ixgbe and hopefully more readable.
He also fixes incorrect fiber eeprom write logic.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0864c158 d3cec927
...@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ...@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
goto out; goto out;
} }
eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2, IXGBE_I2C_EEPROM_DEV_ADDR2,
......
...@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) ...@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
netdev_err(adapter->netdev, "Reset adapter\n"); netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++; adapter->tx_timeout_count++;
rtnl_lock();
ixgbe_reinit_locked(adapter); ixgbe_reinit_locked(adapter);
rtnl_unlock();
} }
/** /**
......
...@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc { ...@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED -2 #define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3 #define IXGBE_ERR_INVALID_ARGUMENT -3
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */ #endif /* _IXGBEVF_DEFINES_H_ */
...@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) { if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count; adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].count = new_rx_count; adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
goto clear_reset; goto clear_reset;
...@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_ring[i] = adapter->tx_ring[i]; tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count; tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
if (!err) if (!err)
...@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_ring[i] = adapter->rx_ring[i]; rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count; rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
if (!err) if (!err)
...@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */ /* Tx */
if (tx_ring) { if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbevf_free_tx_resources(adapter, ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
&adapter->tx_ring[i]); *adapter->tx_ring[i] = tx_ring[i];
adapter->tx_ring[i] = tx_ring[i];
} }
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
...@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */ /* Rx */
if (rx_ring) { if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbevf_free_rx_resources(adapter, ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
&adapter->rx_ring[i]); *adapter->rx_ring[i] = rx_ring[i];
adapter->rx_ring[i] = rx_ring[i];
} }
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
...@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0; tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
rx_yields += adapter->rx_ring[i].bp_yields; rx_yields += adapter->rx_ring[i]->bp_yields;
rx_cleaned += adapter->rx_ring[i].bp_cleaned; rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
rx_yields += adapter->rx_ring[i].bp_yields; rx_yields += adapter->rx_ring[i]->bp_yields;
} }
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
tx_yields += adapter->tx_ring[i].bp_yields; tx_yields += adapter->tx_ring[i]->bp_yields;
tx_cleaned += adapter->tx_ring[i].bp_cleaned; tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
tx_yields += adapter->tx_ring[i].bp_yields; tx_yields += adapter->tx_ring[i]->bp_yields;
} }
adapter->bp_rx_yields = rx_yields; adapter->bp_rx_yields = rx_yields;
......
...@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ...@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED) if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false; rc = false;
q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock); spin_unlock_bh(&q_vector->lock);
return rc; return rc;
} }
...@@ -326,7 +327,7 @@ struct ixgbevf_adapter { ...@@ -326,7 +327,7 @@ struct ixgbevf_adapter {
u32 eims_other; u32 eims_other;
/* TX */ /* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
int num_tx_queues; int num_tx_queues;
u64 restart_queue; u64 restart_queue;
u64 hw_csum_tx_good; u64 hw_csum_tx_good;
...@@ -336,7 +337,7 @@ struct ixgbevf_adapter { ...@@ -336,7 +337,7 @@ struct ixgbevf_adapter {
u32 tx_timeout_count; u32 tx_timeout_count;
/* RX */ /* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */ struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
int num_rx_queues; int num_rx_queues;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources; u64 hw_rx_no_dma_resources;
......
...@@ -848,8 +848,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, ...@@ -848,8 +848,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{ {
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
a->rx_ring[r_idx].next = q_vector->rx.ring; a->rx_ring[r_idx]->next = q_vector->rx.ring;
q_vector->rx.ring = &a->rx_ring[r_idx]; q_vector->rx.ring = a->rx_ring[r_idx];
q_vector->rx.count++; q_vector->rx.count++;
} }
...@@ -858,8 +858,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, ...@@ -858,8 +858,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{ {
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
a->tx_ring[t_idx].next = q_vector->tx.ring; a->tx_ring[t_idx]->next = q_vector->tx.ring;
q_vector->tx.ring = &a->tx_ring[t_idx]; q_vector->tx.ring = a->tx_ring[t_idx];
q_vector->tx.count++; q_vector->tx.count++;
} }
...@@ -1087,39 +1087,82 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) ...@@ -1087,39 +1087,82 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
} }
/** /**
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
* @adapter: board private structure * @adapter: board private structure
* @ring: structure containing ring specific data
* *
* Configure the Tx unit of the MAC after a reset. * Configure the Tx descriptor ring after a reset.
**/ **/
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{ {
u64 tdba;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 i, j, tdlen, txctrl; u64 tdba = ring->dma;
int wait_loop = 10;
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* Setup the HW Tx Head and Tail descriptor pointers */ /* disable queue to avoid issues while updating state */
for (i = 0; i < adapter->num_tx_queues; i++) { IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
struct ixgbevf_ring *ring = &adapter->tx_ring[i]; IXGBE_WRITE_FLUSH(hw);
j = ring->reg_idx;
tdba = ring->dma; IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
(tdba & DMA_BIT_MASK(32))); ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); /* disable head writeback */
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
/* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
(IXGBE_DCA_TXCTRL_DESC_RRO_EN |
IXGBE_DCA_TXCTRL_DATA_RRO_EN));
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0; ring->next_to_clean = 0;
ring->next_to_use = 0; ring->next_to_use = 0;
/* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order. /* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/ */
txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); txdctl |= (8 << 16); /* WTHRESH = 8 */
txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); /* Setting PTHRESH to 32 both improves performance */
} txdctl |= (1 << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
/* poll to verify queue is enabled */
do {
usleep_range(1000, 2000);
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
pr_err("Could not enable Tx Queue %d\n", reg_idx);
}
/**
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
**/
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
{
u32 i;
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
} }
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
...@@ -1130,7 +1173,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) ...@@ -1130,7 +1173,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl; u32 srrctl;
rx_ring = &adapter->rx_ring[index]; rx_ring = adapter->rx_ring[index];
srrctl = IXGBE_SRRCTL_DROP_EN; srrctl = IXGBE_SRRCTL_DROP_EN;
...@@ -1188,7 +1231,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) ...@@ -1188,7 +1231,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
rx_buf_len = IXGBEVF_RXBUFFER_10K; rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].rx_buf_len = rx_buf_len; adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
}
#define IXGBEVF_MAX_RX_DESC_POLL 10
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
/* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
/* the hardware may take up to 100us to really disable the rx queue */
do {
udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop)
pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
reg_idx);
}
static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
do {
usleep_range(1000, 2000);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop)
pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
reg_idx);
}
static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
ixgbevf_disable_rx_queue(adapter, ring);
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
/* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN);
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
ixgbevf_configure_srrctl(adapter, reg_idx);
/* prevent DMA from exceeding buffer space available */
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
ixgbevf_rx_desc_queue_enable(adapter, ring);
ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
} }
/** /**
...@@ -1199,10 +1328,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) ...@@ -1199,10 +1328,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
**/ **/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{ {
u64 rdba; int i;
struct ixgbe_hw *hw = &adapter->hw;
int i, j;
u32 rdlen;
ixgbevf_setup_psrtype(adapter); ixgbevf_setup_psrtype(adapter);
...@@ -1211,23 +1337,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ...@@ -1211,23 +1337,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++)
struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
rdba = ring->dma;
j = ring->reg_idx;
rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
(rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
ring->next_to_clean = 0;
ring->next_to_use = 0;
ixgbevf_configure_srrctl(adapter, j);
}
} }
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
...@@ -1389,7 +1500,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) ...@@ -1389,7 +1500,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
if (num_tcs > 1) { if (num_tcs > 1) {
/* update default Tx ring register index */ /* update default Tx ring register index */
adapter->tx_ring[0].reg_idx = def_q; adapter->tx_ring[0]->reg_idx = def_q;
/* we need as many queues as traffic classes */ /* we need as many queues as traffic classes */
num_rx_queues = num_tcs; num_rx_queues = num_tcs;
...@@ -1409,69 +1520,14 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) ...@@ -1409,69 +1520,14 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
static void ixgbevf_configure(struct ixgbevf_adapter *adapter) static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev;
int i;
ixgbevf_configure_dcb(adapter); ixgbevf_configure_dcb(adapter);
ixgbevf_set_rx_mode(netdev); ixgbevf_set_rx_mode(adapter->netdev);
ixgbevf_restore_vlan(adapter); ixgbevf_restore_vlan(adapter);
ixgbevf_configure_tx(adapter); ixgbevf_configure_tx(adapter);
ixgbevf_configure_rx(adapter); ixgbevf_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
ixgbevf_alloc_rx_buffers(adapter, ring,
ixgbevf_desc_unused(ring));
}
}
#define IXGBEVF_MAX_RX_DESC_POLL 10
static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
int rxr)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
u32 rxdctl;
int j = adapter->rx_ring[rxr].reg_idx;
do {
usleep_range(1000, 2000);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop)
hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
rxr);
ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
(adapter->rx_ring[rxr].count - 1));
}
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
/* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
/* the hardware may take up to 100us to really disable the rx queue */
do {
udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop)
hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
reg_idx);
} }
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
...@@ -1536,37 +1592,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ...@@ -1536,37 +1592,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int i, j = 0;
int num_rx_rings = adapter->num_rx_queues;
u32 txdctl, rxdctl;
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
}
for (i = 0; i < num_rx_rings; i++) {
j = adapter->rx_ring[i].reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
if (hw->mac.type == ixgbe_mac_X540_vf) {
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
IXGBE_RXDCTL_RLPML_EN);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
ixgbevf_rx_desc_queue_enable(adapter, i);
}
ixgbevf_configure_msix(adapter); ixgbevf_configure_msix(adapter);
...@@ -1686,7 +1711,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) ...@@ -1686,7 +1711,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]);
} }
/** /**
...@@ -1698,22 +1723,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) ...@@ -1698,22 +1723,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]);
} }
void ixgbevf_down(struct ixgbevf_adapter *adapter) void ixgbevf_down(struct ixgbevf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 txdctl; int i;
int i, j;
/* signal that we are down to the interrupt handler */ /* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state); set_bit(__IXGBEVF_DOWN, &adapter->state);
/* disable all enabled rx queues */ /* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]); ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
netif_tx_disable(netdev); netif_tx_disable(netdev);
...@@ -1734,10 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) ...@@ -1734,10 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */ /* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx; u8 reg_idx = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
(txdctl & ~IXGBE_TXDCTL_ENABLE)); IXGBE_TXDCTL_SWFLSH);
} }
netif_carrier_off(netdev); netif_carrier_off(netdev);
...@@ -1875,40 +1899,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) ...@@ -1875,40 +1899,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
**/ **/
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
{ {
int i; struct ixgbevf_ring *ring;
int rx = 0, tx = 0;
adapter->tx_ring = kcalloc(adapter->num_tx_queues, for (; tx < adapter->num_tx_queues; tx++) {
sizeof(struct ixgbevf_ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!adapter->tx_ring) if (!ring)
goto err_tx_ring_allocation; goto err_allocation;
adapter->rx_ring = kcalloc(adapter->num_rx_queues, ring->dev = &adapter->pdev->dev;
sizeof(struct ixgbevf_ring), GFP_KERNEL); ring->netdev = adapter->netdev;
if (!adapter->rx_ring) ring->count = adapter->tx_ring_count;
goto err_rx_ring_allocation; ring->queue_index = tx;
ring->reg_idx = tx;
for (i = 0; i < adapter->num_tx_queues; i++) { adapter->tx_ring[tx] = ring;
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
/* reg_idx may be remapped later by DCB config */
adapter->tx_ring[i].reg_idx = i;
adapter->tx_ring[i].dev = &adapter->pdev->dev;
adapter->tx_ring[i].netdev = adapter->netdev;
} }
for (i = 0; i < adapter->num_rx_queues; i++) { for (; rx < adapter->num_rx_queues; rx++) {
adapter->rx_ring[i].count = adapter->rx_ring_count; ring = kzalloc(sizeof(*ring), GFP_KERNEL);
adapter->rx_ring[i].queue_index = i; if (!ring)
adapter->rx_ring[i].reg_idx = i; goto err_allocation;
adapter->rx_ring[i].dev = &adapter->pdev->dev;
adapter->rx_ring[i].netdev = adapter->netdev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->count = adapter->rx_ring_count;
ring->queue_index = rx;
ring->reg_idx = rx;
adapter->rx_ring[rx] = ring;
} }
return 0; return 0;
err_rx_ring_allocation: err_allocation:
kfree(adapter->tx_ring); while (tx) {
err_tx_ring_allocation: kfree(adapter->tx_ring[--tx]);
adapter->tx_ring[tx] = NULL;
}
while (rx) {
kfree(adapter->rx_ring[--rx]);
adapter->rx_ring[rx] = NULL;
}
return -ENOMEM; return -ENOMEM;
} }
...@@ -2099,6 +2133,17 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2099,6 +2133,17 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
**/ **/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{ {
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
kfree(adapter->tx_ring[i]);
adapter->tx_ring[i] = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
kfree(adapter->rx_ring[i]);
adapter->rx_ring[i] = NULL;
}
adapter->num_tx_queues = 0; adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0; adapter->num_rx_queues = 0;
...@@ -2229,11 +2274,11 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) ...@@ -2229,11 +2274,11 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error += adapter->hw_csum_rx_error +=
adapter->rx_ring[i].hw_csum_rx_error; adapter->rx_ring[i]->hw_csum_rx_error;
adapter->hw_csum_rx_good += adapter->hw_csum_rx_good +=
adapter->rx_ring[i].hw_csum_rx_good; adapter->rx_ring[i]->hw_csum_rx_good;
adapter->rx_ring[i].hw_csum_rx_error = 0; adapter->rx_ring[i]->hw_csum_rx_error = 0;
adapter->rx_ring[i].hw_csum_rx_good = 0; adapter->rx_ring[i]->hw_csum_rx_good = 0;
} }
} }
...@@ -2396,6 +2441,10 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, ...@@ -2396,6 +2441,10 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
vfree(tx_ring->tx_buffer_info); vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL; tx_ring->tx_buffer_info = NULL;
/* if not set, then don't free */
if (!tx_ring->desc)
return;
dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma); tx_ring->dma);
...@@ -2413,10 +2462,8 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -2413,10 +2462,8 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i].desc) if (adapter->tx_ring[i]->desc)
ixgbevf_free_tx_resources(adapter, ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
&adapter->tx_ring[i]);
} }
/** /**
...@@ -2471,7 +2518,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -2471,7 +2518,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0; int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw,
...@@ -2533,7 +2580,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) ...@@ -2533,7 +2580,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0; int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw,
...@@ -2577,9 +2624,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) ...@@ -2577,9 +2624,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i].desc) if (adapter->rx_ring[i]->desc)
ixgbevf_free_rx_resources(adapter, ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
&adapter->rx_ring[i]);
} }
/** /**
...@@ -3069,7 +3115,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3069,7 +3115,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
tx_ring = &adapter->tx_ring[r_idx]; tx_ring = adapter->tx_ring[r_idx];
/* /*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
...@@ -3222,8 +3268,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -3222,8 +3268,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int ixgbevf_resume(struct pci_dev *pdev) static int ixgbevf_resume(struct pci_dev *pdev)
{ {
struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev; struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err; u32 err;
pci_set_power_state(pdev, PCI_D0); pci_set_power_state(pdev, PCI_D0);
...@@ -3282,7 +3328,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, ...@@ -3282,7 +3328,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
ring = &adapter->rx_ring[i]; ring = adapter->rx_ring[i];
do { do {
start = u64_stats_fetch_begin_bh(&ring->syncp); start = u64_stats_fetch_begin_bh(&ring->syncp);
bytes = ring->total_bytes; bytes = ring->total_bytes;
...@@ -3293,7 +3339,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, ...@@ -3293,7 +3339,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
} }
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
ring = &adapter->tx_ring[i]; ring = adapter->tx_ring[i];
do { do {
start = u64_stats_fetch_begin_bh(&ring->syncp); start = u64_stats_fetch_begin_bh(&ring->syncp);
bytes = ring->total_bytes; bytes = ring->total_bytes;
...@@ -3528,9 +3574,6 @@ static void ixgbevf_remove(struct pci_dev *pdev) ...@@ -3528,9 +3574,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n"); hw_dbg(&adapter->hw, "Remove complete\n");
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
free_netdev(netdev); free_netdev(netdev);
pci_disable_device(pdev); pci_disable_device(pdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment