Commit 2d39d576 authored by Yi Zou's avatar Yi Zou Committed by David S. Miller

ixgbe: make sure per Rx queue is disabled before unmapping the receive buffer

When disable the Rx logic globally, we would also want to disable the per Rx
queue receive logic by per queue Rx control register RXDCTL so no more DMA is
happening from the packet buffer to the receive buffer associated with the Rx
ring, before we start unmapping Rx ring receive buffer. The hardware may take
max of 100us before the corresponding Rx queue is really disabled. Added
ixgbe_disable_rx_queue() for this purpose.
Signed-off-by: default avatarYi Zou <yi.zou@intel.com>
Tested-by: default avatarStephen Ko <stephen.s.ko@intel.com>
Tested-by: default avatarRoss Brattain <ross.b.brattain@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5377a416
...@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *); ...@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_ring *); extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
......
...@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN; reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); ixgbe_disable_rx_queue(adapter, rx_ring);
reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
/* now Tx */ /* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
......
...@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, ...@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
} }
} }
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
/* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
return;
/* the hardware may take up to 100us to really disable the rx queue */
do {
udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!wait_loop) {
e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
"the polling period\n", reg_idx);
}
}
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring) struct ixgbe_ring *ring)
{ {
...@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ...@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* disable queue to avoid issues while updating state */ /* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), ixgbe_disable_rx_queue(adapter, ring);
rxdctl & ~IXGBE_RXDCTL_ENABLE);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
...@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ...@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
IXGBE_WRITE_FLUSH(hw); /* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
/* this call also flushes the previous write */
ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
msleep(10); msleep(10);
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment