Commit e31230f9 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: add helper to reassign rings to IRQ vectors

Instead of fixing ring -> vector relations up in ring swap functions
put the reassignment into a helper function which will reinit all
links.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0668b60b
...@@ -1601,16 +1601,11 @@ nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s) ...@@ -1601,16 +1601,11 @@ nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s)
static void static void
nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
{ {
struct nfp_net_tx_ring *rings = s->rings;
struct nfp_net_ring_set new = *s; struct nfp_net_ring_set new = *s;
unsigned int r;
s->dcnt = nn->txd_cnt; s->dcnt = nn->txd_cnt;
s->rings = nn->tx_rings; s->rings = nn->tx_rings;
for (r = 0; r < nn->num_tx_rings; r++)
nn->tx_rings[r].r_vec->tx_ring = &rings[r];
nn->txd_cnt = new.dcnt; nn->txd_cnt = new.dcnt;
nn->tx_rings = new.rings; nn->tx_rings = new.rings;
} }
...@@ -1728,17 +1723,12 @@ nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s) ...@@ -1728,17 +1723,12 @@ nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s)
static void static void
nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
{ {
struct nfp_net_rx_ring *rings = s->rings;
struct nfp_net_ring_set new = *s; struct nfp_net_ring_set new = *s;
unsigned int r;
s->mtu = nn->netdev->mtu; s->mtu = nn->netdev->mtu;
s->dcnt = nn->rxd_cnt; s->dcnt = nn->rxd_cnt;
s->rings = nn->rx_rings; s->rings = nn->rx_rings;
for (r = 0; r < nn->num_rx_rings; r++)
nn->rx_rings[r].r_vec->rx_ring = &rings[r];
nn->netdev->mtu = new.mtu; nn->netdev->mtu = new.mtu;
nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu); nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
nn->rxd_cnt = new.dcnt; nn->rxd_cnt = new.dcnt;
...@@ -1759,6 +1749,14 @@ nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) ...@@ -1759,6 +1749,14 @@ nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
kfree(rings); kfree(rings);
} }
static void
nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx)
{
r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
r_vec->tx_ring = idx < nn->num_tx_rings ? &nn->tx_rings[idx] : NULL;
}
static int static int
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx) int idx)
...@@ -1766,20 +1764,6 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -1766,20 +1764,6 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
int err; int err;
if (idx < nn->num_tx_rings) {
r_vec->tx_ring = &nn->tx_rings[idx];
nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
} else {
r_vec->tx_ring = NULL;
}
if (idx < nn->num_rx_rings) {
r_vec->rx_ring = &nn->rx_rings[idx];
nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
} else {
r_vec->rx_ring = NULL;
}
snprintf(r_vec->name, sizeof(r_vec->name), snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nn->netdev->name, idx); "%s-rxtx-%d", nn->netdev->name, idx);
err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
...@@ -2100,6 +2084,9 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2100,6 +2084,9 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_rx_rings; goto err_free_rx_rings;
} }
for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
if (err) if (err)
goto err_free_rings; goto err_free_rings;
...@@ -2247,11 +2234,16 @@ nfp_net_ring_swap_enable(struct nfp_net *nn, ...@@ -2247,11 +2234,16 @@ nfp_net_ring_swap_enable(struct nfp_net *nn,
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *rx,
struct nfp_net_ring_set *tx) struct nfp_net_ring_set *tx)
{ {
unsigned int r;
if (rx) if (rx)
nfp_net_rx_ring_set_swap(nn, rx); nfp_net_rx_ring_set_swap(nn, rx);
if (tx) if (tx)
nfp_net_tx_ring_set_swap(nn, tx); nfp_net_tx_ring_set_swap(nn, tx);
for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
return __nfp_net_set_config_and_enable(nn); return __nfp_net_set_config_and_enable(nn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment