Commit efe3d3c8 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: convert rings from q_vector bit indexed array to linked list

This change converts the current bit array into a linked list so that the
q_vectors can simply go through ring by ring and locate each ring needing
to be cleaned.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 30065e63
...@@ -209,6 +209,7 @@ enum ixbge_ring_state_t { ...@@ -209,6 +209,7 @@ enum ixbge_ring_state_t {
#define clear_ring_rsc_enabled(ring) \ #define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring { struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */ struct device *dev; /* device for DMA mapping */
struct net_device *netdev; /* netdev ring belongs to */ struct net_device *netdev; /* netdev ring belongs to */
...@@ -277,11 +278,7 @@ struct ixgbe_ring_feature { ...@@ -277,11 +278,7 @@ struct ixgbe_ring_feature {
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
struct ixgbe_ring_container { struct ixgbe_ring_container {
#if MAX_RX_QUEUES > MAX_TX_QUEUES struct ixgbe_ring *ring; /* pointer to linked list of rings */
DECLARE_BITMAP(idx, MAX_RX_QUEUES);
#else
DECLARE_BITMAP(idx, MAX_TX_QUEUES);
#endif
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */ u16 work_limit; /* total work allowed per interrupt */
......
...@@ -974,26 +974,17 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, ...@@ -974,26 +974,17 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
{ {
struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
int cpu = get_cpu(); int cpu = get_cpu();
long r_idx;
int i;
if (q_vector->cpu == cpu) if (q_vector->cpu == cpu)
goto out_no_update; goto out_no_update;
r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
for (i = 0; i < q_vector->tx.count; i++) { ixgbe_update_tx_dca(adapter, ring, cpu);
ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
for (i = 0; i < q_vector->rx.count; i++) { ixgbe_update_rx_dca(adapter, ring, cpu);
ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
q_vector->cpu = cpu; q_vector->cpu = cpu;
out_no_update: out_no_update:
...@@ -1546,7 +1537,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int); ...@@ -1546,7 +1537,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
int i, q_vectors, v_idx, r_idx; int q_vectors, v_idx;
u32 mask; u32 mask;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
...@@ -1556,33 +1547,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ...@@ -1556,33 +1547,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* corresponding register. * corresponding register.
*/ */
for (v_idx = 0; v_idx < q_vectors; v_idx++) { for (v_idx = 0; v_idx < q_vectors; v_idx++) {
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx]; q_vector = adapter->q_vector[v_idx];
/* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rx.idx,
adapter->num_rx_queues);
for (i = 0; i < q_vector->rx.count; i++) {
u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->rx.idx,
adapter->num_rx_queues,
r_idx + 1);
}
r_idx = find_first_bit(q_vector->tx.idx,
adapter->num_tx_queues);
for (i = 0; i < q_vector->tx.count; i++) {
u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->tx.idx,
adapter->num_tx_queues,
r_idx + 1);
}
if (q_vector->tx.count && !q_vector->rx.count) for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
if (q_vector->tx.ring && !q_vector->rx.ring)
/* tx only */ /* tx only */
q_vector->eitr = adapter->tx_eitr_param; q_vector->eitr = adapter->tx_eitr_param;
else if (q_vector->rx.count) else if (q_vector->rx.ring)
/* rx or mixed */ /* rx or mixed */
q_vector->eitr = adapter->rx_eitr_param; q_vector->eitr = adapter->rx_eitr_param;
...@@ -2006,20 +1983,10 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, ...@@ -2006,20 +1983,10 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
{ {
struct ixgbe_q_vector *q_vector = data; struct ixgbe_q_vector *q_vector = data;
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *tx_ring;
int i, r_idx;
if (!q_vector->tx.count) if (!q_vector->tx.count)
return IRQ_HANDLED; return IRQ_HANDLED;
r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->tx.count; i++) {
tx_ring = adapter->tx_ring[r_idx];
r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
/* EIAM disabled interrupts (on this vector) for us */ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
...@@ -2034,22 +2001,6 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) ...@@ -2034,22 +2001,6 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
{ {
struct ixgbe_q_vector *q_vector = data; struct ixgbe_q_vector *q_vector = data;
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *rx_ring;
int r_idx;
int i;
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector);
#endif
r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rx.count; i++) {
rx_ring = adapter->rx_ring[r_idx];
r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
if (!q_vector->rx.count) if (!q_vector->rx.count)
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -2063,28 +2014,10 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) ...@@ -2063,28 +2014,10 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
{ {
struct ixgbe_q_vector *q_vector = data; struct ixgbe_q_vector *q_vector = data;
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
int r_idx;
int i;
if (!q_vector->tx.count && !q_vector->rx.count) if (!q_vector->tx.count && !q_vector->rx.count)
return IRQ_HANDLED; return IRQ_HANDLED;
r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx];
r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx];
r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
/* EIAM disabled interrupts (on this vector) for us */ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
...@@ -2104,19 +2037,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) ...@@ -2104,19 +2037,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
struct ixgbe_q_vector *q_vector = struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi); container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *rx_ring = NULL;
int work_done = 0; int work_done = 0;
long r_idx;
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); ixgbe_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget);
rx_ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */ /* If all Rx work done, exit the polling mode */
if (work_done < budget) { if (work_done < budget) {
...@@ -2144,38 +2072,29 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) ...@@ -2144,38 +2072,29 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
struct ixgbe_q_vector *q_vector = struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi); container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring = NULL; struct ixgbe_ring *ring;
int work_done = 0, i; int work_done = 0;
long r_idx; bool clean_complete = true;
bool tx_clean_complete = true;
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
for (i = 0; i < q_vector->tx.count; i++) { clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
ring = adapter->tx_ring[r_idx];
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
/* attempt to distribute budget to each queue fairly, but don't allow /* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */ * the budget to go below 1 because we'll exit polling */
budget /= (q_vector->rx.count ?: 1); budget /= (q_vector->rx.count ?: 1);
budget = max(budget, 1); budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rx.count; i++) { for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); if (!clean_complete)
ring = adapter->rx_ring[r_idx]; work_done = budget;
/* If all Rx work done, exit the polling mode */ /* If all Rx work done, exit the polling mode */
if (work_done < budget) { if (work_done < budget) {
napi_complete(napi); napi_complete(napi);
...@@ -2203,32 +2122,23 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) ...@@ -2203,32 +2122,23 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
struct ixgbe_q_vector *q_vector = struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi); container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *tx_ring = NULL;
int work_done = 0;
long r_idx;
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); if (!ixgbe_clean_tx_irq(q_vector, q_vector->tx.ring))
tx_ring = adapter->tx_ring[r_idx]; return budget;
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
/* If all Tx work done, exit the polling mode */ /* If all Tx work done, exit the polling mode */
if (work_done < budget) { napi_complete(napi);
napi_complete(napi); if (adapter->tx_itr_setting & 1)
if (adapter->tx_itr_setting & 1) ixgbe_set_itr(q_vector);
ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state))
if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
}
return work_done; return 0;
} }
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
...@@ -2237,9 +2147,10 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, ...@@ -2237,9 +2147,10 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
set_bit(r_idx, q_vector->rx.idx);
q_vector->rx.count++;
rx_ring->q_vector = q_vector; rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
} }
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
...@@ -2248,9 +2159,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, ...@@ -2248,9 +2159,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
set_bit(t_idx, q_vector->tx.idx);
q_vector->tx.count++;
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.work_limit = a->tx_work_limit; q_vector->tx.work_limit = a->tx_work_limit;
} }
...@@ -2508,14 +2420,26 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ...@@ -2508,14 +2420,26 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
{ {
int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int i;
/* legacy and MSI only use one vector */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
q_vectors = 1;
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i]->q_vector = NULL;
adapter->rx_ring[i]->next = NULL;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i]->q_vector = NULL;
adapter->tx_ring[i]->next = NULL;
}
for (i = 0; i < q_vectors; i++) { for (i = 0; i < q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES); memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES); memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
q_vector->rx.count = 0;
q_vector->tx.count = 0;
} }
} }
...@@ -5923,7 +5847,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) ...@@ -5923,7 +5847,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* get one bit for every active tx/rx interrupt vector */ /* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i]; struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.count || qv->tx.count) if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i); eics |= ((u64)1 << i);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment