Commit e5c5180a authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-ctrl-vNIC'

Jakub Kicinski says:

====================
nfp: ctrl vNIC

This series adds the ability to use one vNIC as a control channel
for passing messages to and from the application firmware.  The
implementation restructures the existing netdev vNIC code to be able
to deal with nfp_nets with netdev pointer set to NULL.  Control vNICs
are not visible to userspace (other than for dumping ring state), and
since they don't have netdevs we use a tasklet for RX and simple skb
list for TX queuing.

Due to special status of the control vNIC we have to reshuffle the
init code a bit to make sure control vNIC will be fully brought up
(and therefore communication with app FW can happen) before any netdev
or port is visible to user space.

FW will designate which vNIC is supposed to be used as control one
by setting _pf%u_net_ctrl_bar symbol.  Some FWs depend on metadata
being prepended to control message, some prefer to look at queue ID
to decide that something is a control message.  Our implementation
can cater to both.

First two users of this code will be eBPF maps and flower offloads.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2b30842b f9380629
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/skbuff.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_cpp.h"
...@@ -42,6 +43,23 @@ static const struct nfp_app_type *apps[] = { ...@@ -42,6 +43,23 @@ static const struct nfp_app_type *apps[] = {
&app_bpf, &app_bpf,
}; };
struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
{
struct sk_buff *skb;
if (nfp_app_ctrl_has_meta(app))
size += 8;
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
return NULL;
if (nfp_app_ctrl_has_meta(app))
skb_reserve(skb, 8);
return skb;
}
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{ {
struct nfp_app *app; struct nfp_app *app;
......
...@@ -37,7 +37,9 @@ ...@@ -37,7 +37,9 @@
struct bpf_prog; struct bpf_prog;
struct net_device; struct net_device;
struct pci_dev; struct pci_dev;
struct sk_buff;
struct tc_to_netdev; struct tc_to_netdev;
struct sk_buff;
struct nfp_app; struct nfp_app;
struct nfp_cpp; struct nfp_cpp;
struct nfp_pf; struct nfp_pf;
...@@ -55,12 +57,16 @@ extern const struct nfp_app_type app_bpf; ...@@ -55,12 +57,16 @@ extern const struct nfp_app_type app_bpf;
* struct nfp_app_type - application definition * struct nfp_app_type - application definition
* @id: application ID * @id: application ID
* @name: application name * @name: application name
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
* *
* Callbacks * Callbacks
* @init: perform basic app checks * @init: perform basic app checks
* @extra_cap: extra capabilities string * @extra_cap: extra capabilities string
* @vnic_init: init vNICs (assign port types, etc.) * @vnic_init: init vNICs (assign port types, etc.)
* @vnic_clean: clean up app's vNIC state * @vnic_clean: clean up app's vNIC state
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo * @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded) * @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program * @xdp_offload: offload an XDP program
...@@ -69,6 +75,8 @@ struct nfp_app_type { ...@@ -69,6 +75,8 @@ struct nfp_app_type {
enum nfp_app_id id; enum nfp_app_id id;
const char *name; const char *name;
bool ctrl_has_meta;
int (*init)(struct nfp_app *app); int (*init)(struct nfp_app *app);
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn); const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
...@@ -77,6 +85,11 @@ struct nfp_app_type { ...@@ -77,6 +85,11 @@ struct nfp_app_type {
unsigned int id); unsigned int id);
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn); void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
u32 handle, __be16 proto, struct tc_to_netdev *tc); u32 handle, __be16 proto, struct tc_to_netdev *tc);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
...@@ -89,6 +102,7 @@ struct nfp_app_type { ...@@ -89,6 +102,7 @@ struct nfp_app_type {
* @pdev: backpointer to PCI device * @pdev: backpointer to PCI device
* @pf: backpointer to NFP PF structure * @pf: backpointer to NFP PF structure
* @cpp: pointer to the CPP handle * @cpp: pointer to the CPP handle
* @ctrl: pointer to ctrl vNIC struct
* @type: pointer to const application ops and info * @type: pointer to const application ops and info
*/ */
struct nfp_app { struct nfp_app {
...@@ -96,9 +110,13 @@ struct nfp_app { ...@@ -96,9 +110,13 @@ struct nfp_app {
struct nfp_pf *pf; struct nfp_pf *pf;
struct nfp_cpp *cpp; struct nfp_cpp *cpp;
struct nfp_net *ctrl;
const struct nfp_app_type *type; const struct nfp_app_type *type;
}; };
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app) static inline int nfp_app_init(struct nfp_app *app)
{ {
if (!app->type->init) if (!app->type->init)
...@@ -118,6 +136,21 @@ static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn) ...@@ -118,6 +136,21 @@ static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
app->type->vnic_clean(app, nn); app->type->vnic_clean(app, nn);
} }
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
if (!app->type->start)
return 0;
return app->type->start(app);
}
static inline void nfp_app_stop(struct nfp_app *app)
{
if (!app->type->stop)
return;
app->type->stop(app);
}
static inline const char *nfp_app_name(struct nfp_app *app) static inline const char *nfp_app_name(struct nfp_app *app)
{ {
if (!app) if (!app)
...@@ -125,6 +158,16 @@ static inline const char *nfp_app_name(struct nfp_app *app) ...@@ -125,6 +158,16 @@ static inline const char *nfp_app_name(struct nfp_app *app)
return app->type->name; return app->type->name;
} }
static inline bool nfp_app_needs_ctrl_vnic(struct nfp_app *app)
{
return app && app->type->ctrl_msg_rx;
}
static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
{
return app->type->ctrl_has_meta;
}
static inline const char *nfp_app_extra_cap(struct nfp_app *app, static inline const char *nfp_app_extra_cap(struct nfp_app *app,
struct nfp_net *nn) struct nfp_net *nn)
{ {
...@@ -163,6 +206,18 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, ...@@ -163,6 +206,18 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog); return app->type->xdp_offload(app, nn, prog);
} }
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
return nfp_ctrl_tx(app->ctrl, skb);
}
static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
{
app->type->ctrl_msg_rx(app, skb);
}
struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id); struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app); void nfp_app_free(struct nfp_app *app);
......
...@@ -63,12 +63,13 @@ struct nfp_nsp_identify; ...@@ -63,12 +63,13 @@ struct nfp_nsp_identify;
* @cpp: Pointer to the CPP handle * @cpp: Pointer to the CPP handle
* @app: Pointer to the APP handle * @app: Pointer to the APP handle
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs * @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @tx_area: Pointer to the CPP area for the TX queues * @ctrl_vnic_bar: Pointer to the CPP area for the ctrl vNIC's BAR
* @rx_area: Pointer to the CPP area for the FL/RX queues * @qc_area: Pointer to the CPP area for the queues
* @irq_entries: Array of MSI-X entries for all vNICs * @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled * @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded? * @fw_loaded: Is the firmware loaded?
* @ctrl_vnic: Pointer to the control vNIC if available
* @eth_tbl: NSP ETH table * @eth_tbl: NSP ETH table
* @nspi: NSP identification info * @nspi: NSP identification info
* @hwmon_dev: pointer to hwmon device * @hwmon_dev: pointer to hwmon device
...@@ -88,8 +89,8 @@ struct nfp_pf { ...@@ -88,8 +89,8 @@ struct nfp_pf {
struct nfp_app *app; struct nfp_app *app;
struct nfp_cpp_area *data_vnic_bar; struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *tx_area; struct nfp_cpp_area *ctrl_vnic_bar;
struct nfp_cpp_area *rx_area; struct nfp_cpp_area *qc_area;
struct msix_entry *irq_entries; struct msix_entry *irq_entries;
...@@ -98,6 +99,8 @@ struct nfp_pf { ...@@ -98,6 +99,8 @@ struct nfp_pf {
bool fw_loaded; bool fw_loaded;
struct nfp_net *ctrl_vnic;
struct nfp_eth_table *eth_tbl; struct nfp_eth_table *eth_tbl;
struct nfp_nsp_identify *nspi; struct nfp_nsp_identify *nspi;
...@@ -129,4 +132,6 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id); ...@@ -129,4 +132,6 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id);
void void
nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id); nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
#endif /* NFP_MAIN_H */ #endif /* NFP_MAIN_H */
...@@ -50,15 +50,32 @@ ...@@ -50,15 +50,32 @@
#include "nfp_net_ctrl.h" #include "nfp_net_ctrl.h"
#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args) #define nn_pr(nn, lvl, fmt, args...) \
#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args) ({ \
#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args) struct nfp_net *__nn = (nn); \
#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args) \
if (__nn->dp.netdev) \
netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
else \
dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
})
#define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
#define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
#define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
#define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
#define nn_dp_warn(dp, fmt, args...) \ #define nn_dp_warn(dp, fmt, args...) \
do { \ ({ \
if (unlikely(net_ratelimit())) \ struct nfp_net_dp *__dp = (dp); \
netdev_warn((dp)->netdev, fmt, ## args); \ \
} while (0) if (unlikely(net_ratelimit())) { \
if (__dp->netdev) \
netdev_warn(__dp->netdev, fmt, ## args); \
else \
dev_warn(__dp->dev, fmt, ## args); \
} \
})
/* Max time to wait for NFP to respond on updates (in seconds) */ /* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5 #define NFP_NET_POLL_TIMEOUT 5
...@@ -388,7 +405,14 @@ struct nfp_net_rx_ring { ...@@ -388,7 +405,14 @@ struct nfp_net_rx_ring {
*/ */
struct nfp_net_r_vector { struct nfp_net_r_vector {
struct nfp_net *nfp_net; struct nfp_net *nfp_net;
struct napi_struct napi; union {
struct napi_struct napi;
struct {
struct tasklet_struct tasklet;
struct sk_buff_head queue;
struct spinlock lock;
};
};
struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_ring *rx_ring; struct nfp_net_rx_ring *rx_ring;
...@@ -681,6 +705,7 @@ static inline void nn_pci_flush(struct nfp_net *nn) ...@@ -681,6 +705,7 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value. * either add to a pointer or to read the pointer value.
*/ */
#define NFP_QCP_QUEUE_ADDR_SZ 0x800 #define NFP_QCP_QUEUE_ADDR_SZ 0x800
#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ) #define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000 #define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004
...@@ -788,6 +813,22 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) ...@@ -788,6 +813,22 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR); return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
} }
static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
{
WARN_ON_ONCE(!nn->dp.netdev && nn->port);
return !!nn->dp.netdev;
}
static inline bool nfp_net_running(struct nfp_net *nn)
{
return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
}
static inline const char *nfp_net_name(struct nfp_net *nn)
{
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
}
/* Globals */ /* Globals */
extern const char nfp_driver_version[]; extern const char nfp_driver_version[];
...@@ -803,13 +844,16 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, ...@@ -803,13 +844,16 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar); void __iomem *ctrl_bar);
struct nfp_net * struct nfp_net *
nfp_net_alloc(struct pci_dev *pdev, nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings); unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn); void nfp_net_free(struct nfp_net *nn);
int nfp_net_init(struct nfp_net *nn); int nfp_net_init(struct nfp_net *nn);
void nfp_net_clean(struct nfp_net *nn); void nfp_net_clean(struct nfp_net *nn);
int nfp_ctrl_open(struct nfp_net *nn);
void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev); void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn); void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update); int nfp_net_reconfig(struct nfp_net *nn, u32 update);
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <net/vxlan.h> #include <net/vxlan.h>
...@@ -392,6 +392,15 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) ...@@ -392,6 +392,15 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
{
struct nfp_net_r_vector *r_vec = data;
tasklet_schedule(&r_vec->tasklet);
return IRQ_HANDLED;
}
/** /**
* nfp_net_read_link_status() - Reread link status from control BAR * nfp_net_read_link_status() - Reread link status from control BAR
* @nn: NFP Network structure * @nn: NFP Network structure
...@@ -503,33 +512,6 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, ...@@ -503,33 +512,6 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
} }
/**
* nfp_net_vecs_init() - Assign IRQs and setup rvecs.
* @nn: NFP Network structure
*/
static void nfp_net_vecs_init(struct nfp_net *nn)
{
struct nfp_net_r_vector *r_vec;
int r;
nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn;
for (r = 0; r < nn->max_r_vecs; r++) {
struct msix_entry *entry;
entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
r_vec = &nn->r_vecs[r];
r_vec->nfp_net = nn;
r_vec->handler = nfp_net_irq_rxtx;
r_vec->irq_entry = entry->entry;
r_vec->irq_vector = entry->vector;
cpumask_set_cpu(r, &r_vec->affinity_mask);
}
}
/** /**
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN) * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
* @nn: NFP Network structure * @nn: NFP Network structure
...@@ -550,7 +532,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, ...@@ -550,7 +532,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry = &nn->irq_entries[vector_idx]; entry = &nn->irq_entries[vector_idx];
snprintf(name, name_sz, format, netdev_name(nn->dp.netdev)); snprintf(name, name_sz, format, nfp_net_name(nn));
err = request_irq(entry->vector, handler, 0, name, nn); err = request_irq(entry->vector, handler, 0, name, nn);
if (err) { if (err) {
nn_err(nn, "Failed to request IRQ %d (err=%d).\n", nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
...@@ -970,6 +952,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) ...@@ -970,6 +952,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
r_vec->tx_pkts += done_pkts; r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync); u64_stats_update_end(&r_vec->tx_sync);
if (!dp->netdev)
return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
if (nfp_net_tx_ring_should_wake(tx_ring)) { if (nfp_net_tx_ring_should_wake(tx_ring)) {
...@@ -1079,7 +1064,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) ...@@ -1079,7 +1064,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->qcp_rd_p = 0; tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0; tx_ring->wr_ptr_add = 0;
if (tx_ring->is_xdp) if (tx_ring->is_xdp || !dp->netdev)
return; return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
...@@ -1769,9 +1754,272 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) ...@@ -1769,9 +1754,272 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
return pkts_polled; return pkts_polled;
} }
/* Control device data path
*/
static bool
nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old)
{
unsigned int real_len = skb->len, meta_len = 0;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
int wr_idx;
dp = &r_vec->nfp_net->dp;
tx_ring = r_vec->tx_ring;
if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
goto err_free;
}
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_busy++;
u64_stats_update_end(&r_vec->tx_sync);
if (!old)
__skb_queue_tail(&r_vec->queue, skb);
else
__skb_queue_head(&r_vec->queue, skb);
return true;
}
if (nfp_app_ctrl_has_meta(nn->app)) {
if (unlikely(skb_headroom(skb) < 8)) {
nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
goto err_free;
}
meta_len = 8;
put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
}
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
goto err_dma_warn;
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
txbuf->skb = skb;
txbuf->dma_addr = dma_addr;
txbuf->fidx = -1;
txbuf->pkt_cnt = 1;
txbuf->real_len = real_len;
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
nfp_desc_set_dma_addr(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
txd->flags = 0;
txd->mss = 0;
txd->lso_hdrlen = 0;
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
nfp_net_tx_xmit_more_flush(tx_ring);
return false;
err_dma_warn:
nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
err_free:
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
dev_kfree_skb_any(skb);
return false;
}
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
bool ret;
spin_lock_bh(&r_vec->lock);
ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
spin_unlock_bh(&r_vec->lock);
return ret;
}
static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&r_vec->queue)))
if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
return;
}
static bool
nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
{
u32 meta_type, meta_tag;
if (!nfp_app_ctrl_has_meta(nn->app))
return !meta_len;
if (meta_len != 8)
return false;
meta_type = get_unaligned_be32(data);
meta_tag = get_unaligned_be32(data + 4);
return (meta_type == NFP_NET_META_PORTID &&
meta_tag == NFP_META_PORT_ID_CTRL);
}
static bool
nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
{
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
dma_addr_t new_dma_addr;
struct sk_buff *skb;
void *new_frag;
int idx;
idx = D_IDX(rx_ring, rx_ring->rd_p);
rxd = &rx_ring->rxds[idx];
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
return false;
/* Memory barrier to ensure that we won't do other reads
* before the DD bit.
*/
dma_rmb();
rx_ring->rd_p++;
rxbuf = &rx_ring->rxbufs[idx];
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len;
pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
pkt_off += meta_len;
else
pkt_off += dp->rx_offset;
meta_off = pkt_off - meta_len;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
meta_len);
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
return true;
}
skb = build_skb(rxbuf->frag, dp->fl_bufsz);
if (unlikely(!skb)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
return true;
}
new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
return true;
}
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
nfp_app_ctrl_rx(nn->app, skb);
return true;
}
static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
{
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
struct nfp_net *nn = r_vec->nfp_net;
struct nfp_net_dp *dp = &nn->dp;
while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
continue;
}
static void nfp_ctrl_poll(unsigned long arg)
{
struct nfp_net_r_vector *r_vec = (void *)arg;
spin_lock_bh(&r_vec->lock);
nfp_net_tx_complete(r_vec->tx_ring);
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
nfp_ctrl_rx(r_vec);
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
}
/* Setup and Configuration /* Setup and Configuration
*/ */
/**
* nfp_net_vecs_init() - Assign IRQs and setup rvecs.
* @nn: NFP Network structure
*/
static void nfp_net_vecs_init(struct nfp_net *nn)
{
struct nfp_net_r_vector *r_vec;
int r;
nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn;
for (r = 0; r < nn->max_r_vecs; r++) {
struct msix_entry *entry;
entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
r_vec = &nn->r_vecs[r];
r_vec->nfp_net = nn;
r_vec->irq_entry = entry->entry;
r_vec->irq_vector = entry->vector;
if (nn->dp.netdev) {
r_vec->handler = nfp_net_irq_rxtx;
} else {
r_vec->handler = nfp_ctrl_irq_rxtx;
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
(unsigned long)r_vec);
tasklet_disable(&r_vec->tasklet);
}
cpumask_set_cpu(r, &r_vec->affinity_mask);
}
}
/** /**
* nfp_net_tx_ring_free() - Free resources allocated to a TX ring * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
* @tx_ring: TX ring to free * @tx_ring: TX ring to free
...@@ -1820,7 +2068,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) ...@@ -1820,7 +2068,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
if (!tx_ring->txbufs) if (!tx_ring->txbufs)
goto err_alloc; goto err_alloc;
if (!tx_ring->is_xdp) if (!tx_ring->is_xdp && dp->netdev)
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx); tx_ring->idx);
...@@ -2034,15 +2282,22 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -2034,15 +2282,22 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int err; int err;
/* Setup NAPI */ /* Setup NAPI */
netif_napi_add(nn->dp.netdev, &r_vec->napi, if (nn->dp.netdev)
nfp_net_poll, NAPI_POLL_WEIGHT); netif_napi_add(nn->dp.netdev, &r_vec->napi,
nfp_net_poll, NAPI_POLL_WEIGHT);
else
tasklet_enable(&r_vec->tasklet);
snprintf(r_vec->name, sizeof(r_vec->name), snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nn->dp.netdev->name, idx); "%s-rxtx-%d", nfp_net_name(nn), idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec); r_vec);
if (err) { if (err) {
netif_napi_del(&r_vec->napi); if (nn->dp.netdev)
netif_napi_del(&r_vec->napi);
else
tasklet_disable(&r_vec->tasklet);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err; return err;
} }
...@@ -2060,7 +2315,11 @@ static void ...@@ -2060,7 +2315,11 @@ static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{ {
irq_set_affinity_hint(r_vec->irq_vector, NULL); irq_set_affinity_hint(r_vec->irq_vector, NULL);
netif_napi_del(&r_vec->napi); if (nn->dp.netdev)
netif_napi_del(&r_vec->napi);
else
tasklet_disable(&r_vec->tasklet);
free_irq(r_vec->irq_vector, r_vec); free_irq(r_vec->irq_vector, r_vec);
} }
...@@ -2236,9 +2495,10 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) ...@@ -2236,9 +2495,10 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); if (nn->dp.netdev)
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu); nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz); nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
...@@ -2275,6 +2535,86 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) ...@@ -2275,6 +2535,86 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
return 0; return 0;
} }
/**
* nfp_net_close_stack() - Quiesce the stack (part of close)
* @nn: NFP Net device to reconfigure
*/
static void nfp_net_close_stack(struct nfp_net *nn)
{
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
netif_carrier_off(nn->dp.netdev);
nn->link_up = false;
for (r = 0; r < nn->dp.num_r_vecs; r++) {
disable_irq(nn->r_vecs[r].irq_vector);
napi_disable(&nn->r_vecs[r].napi);
}
netif_tx_disable(nn->dp.netdev);
}
/**
* nfp_net_close_free_all() - Free all runtime resources
* @nn: NFP Net device to reconfigure
*/
static void nfp_net_close_free_all(struct nfp_net *nn)
{
unsigned int r;
nfp_net_tx_rings_free(&nn->dp);
nfp_net_rx_rings_free(&nn->dp);
for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
}
/**
* nfp_net_netdev_close() - Called when the device is downed
* @netdev: netdev structure
*/
static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
nfp_net_close_stack(nn);
/* Step 2: Tell NFP
*/
nfp_net_clear_config_and_disable(nn);
/* Step 3: Free resources
*/
nfp_net_close_free_all(nn);
nn_dbg(nn, "%s down", netdev->name);
return 0;
}
void nfp_ctrl_close(struct nfp_net *nn)
{
int r;
rtnl_lock();
for (r = 0; r < nn->dp.num_r_vecs; r++) {
disable_irq(nn->r_vecs[r].irq_vector);
tasklet_disable(&nn->r_vecs[r].tasklet);
}
nfp_net_clear_config_and_disable(nn);
nfp_net_close_free_all(nn);
rtnl_unlock();
}
/** /**
* nfp_net_open_stack() - Start the device from stack's perspective * nfp_net_open_stack() - Start the device from stack's perspective
* @nn: NFP Net device to reconfigure * @nn: NFP Net device to reconfigure
...@@ -2294,16 +2634,10 @@ static void nfp_net_open_stack(struct nfp_net *nn) ...@@ -2294,16 +2634,10 @@ static void nfp_net_open_stack(struct nfp_net *nn)
nfp_net_read_link_status(nn); nfp_net_read_link_status(nn);
} }
static int nfp_net_netdev_open(struct net_device *netdev) static int nfp_net_open_alloc_all(struct nfp_net *nn)
{ {
struct nfp_net *nn = netdev_priv(netdev);
int err, r; int err, r;
/* Step 1: Allocate resources for rings and the like
* - Request interrupts
* - Allocate RX and TX ring resources
* - Setup initial RSS table
*/
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
nn->exn_name, sizeof(nn->exn_name), nn->exn_name, sizeof(nn->exn_name),
NFP_NET_IRQ_EXN_IDX, nn->exn_handler); NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
...@@ -2333,13 +2667,42 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2333,13 +2667,42 @@ static int nfp_net_netdev_open(struct net_device *netdev)
for (r = 0; r < nn->max_r_vecs; r++) for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
return 0;
err_free_rx_rings:
nfp_net_rx_rings_free(&nn->dp);
err_cleanup_vec:
r = nn->dp.num_r_vecs;
err_cleanup_vec_p:
while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
return err;
}
static int nfp_net_netdev_open(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
int err;
/* Step 1: Allocate resources for rings and the like
* - Request interrupts
* - Allocate RX and TX ring resources
* - Setup initial RSS table
*/
err = nfp_net_open_alloc_all(nn);
if (err)
return err;
err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
if (err) if (err)
goto err_free_rings; goto err_free_all;
err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
if (err) if (err)
goto err_free_rings; goto err_free_all;
/* Step 2: Configure the NFP /* Step 2: Configure the NFP
* - Enable rings from 0 to tx_rings/rx_rings - 1. * - Enable rings from 0 to tx_rings/rx_rings - 1.
...@@ -2350,7 +2713,7 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2350,7 +2713,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
*/ */
err = nfp_net_set_config_and_enable(nn); err = nfp_net_set_config_and_enable(nn);
if (err) if (err)
goto err_free_rings; goto err_free_all;
/* Step 3: Enable for kernel /* Step 3: Enable for kernel
* - put some freelist descriptors on each RX ring * - put some freelist descriptors on each RX ring
...@@ -2362,89 +2725,38 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2362,89 +2725,38 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0; return 0;
err_free_rings: err_free_all:
nfp_net_tx_rings_free(&nn->dp); nfp_net_close_free_all(nn);
err_free_rx_rings:
nfp_net_rx_rings_free(&nn->dp);
err_cleanup_vec:
r = nn->dp.num_r_vecs;
err_cleanup_vec_p:
while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
return err; return err;
} }
/** int nfp_ctrl_open(struct nfp_net *nn)
* nfp_net_close_stack() - Quiescent the stack (part of close)
* @nn: NFP Net device to reconfigure
*/
static void nfp_net_close_stack(struct nfp_net *nn)
{ {
unsigned int r; int err, r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
netif_carrier_off(nn->dp.netdev);
nn->link_up = false;
for (r = 0; r < nn->dp.num_r_vecs; r++) { /* ring dumping depends on vNICs being opened/closed under rtnl */
disable_irq(nn->r_vecs[r].irq_vector); rtnl_lock();
napi_disable(&nn->r_vecs[r].napi);
}
netif_tx_disable(nn->dp.netdev); err = nfp_net_open_alloc_all(nn);
} if (err)
goto err_unlock;
/** err = nfp_net_set_config_and_enable(nn);
* nfp_net_close_free_all() - Free all runtime resources if (err)
* @nn: NFP Net device to reconfigure goto err_free_all;
*/
static void nfp_net_close_free_all(struct nfp_net *nn)
{
unsigned int r;
for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
}
for (r = 0; r < nn->dp.num_tx_rings; r++) {
nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
}
for (r = 0; r < nn->dp.num_r_vecs; r++) for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); enable_irq(nn->r_vecs[r].irq_vector);
kfree(nn->dp.rx_rings);
kfree(nn->dp.tx_rings);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
}
/**
* nfp_net_netdev_close() - Called when the device is downed
* @netdev: netdev structure
*/
static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
/* Step 1: Disable RX and TX rings from the Linux kernel perspective rtnl_unlock();
*/
nfp_net_close_stack(nn);
/* Step 2: Tell NFP return 0;
*/
nfp_net_clear_config_and_disable(nn);
/* Step 3: Free resources err_free_all:
*/
nfp_net_close_free_all(nn); nfp_net_close_free_all(nn);
err_unlock:
nn_dbg(nn, "%s down", netdev->name); rtnl_unlock();
return 0; return err;
} }
static void nfp_net_set_rx_mode(struct net_device *netdev) static void nfp_net_set_rx_mode(struct net_device *netdev)
...@@ -3029,30 +3341,39 @@ void nfp_net_info(struct nfp_net *nn) ...@@ -3029,30 +3341,39 @@ void nfp_net_info(struct nfp_net *nn)
/** /**
* nfp_net_alloc() - Allocate netdev and related structure * nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device * @pdev: PCI device
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device * @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device * @max_rx_rings: Maximum number of RX rings supported by device
* *
* This function allocates a netdev device and fills in the initial * This function allocates a netdev device and fills in the initial
* part of the @struct nfp_net structure. * part of the @struct nfp_net structure. In case of control device
* nfp_net structure is allocated without the netdev.
* *
* Return: NFP Net device structure, or ERR_PTR on error. * Return: NFP Net device structure, or ERR_PTR on error.
*/ */
struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_tx_rings,
unsigned int max_rx_rings) unsigned int max_rx_rings)
{ {
struct net_device *netdev;
struct nfp_net *nn; struct nfp_net *nn;
netdev = alloc_etherdev_mqs(sizeof(struct nfp_net), if (needs_netdev) {
max_tx_rings, max_rx_rings); struct net_device *netdev;
if (!netdev)
return ERR_PTR(-ENOMEM);
SET_NETDEV_DEV(netdev, &pdev->dev); netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
nn = netdev_priv(netdev); max_tx_rings, max_rx_rings);
if (!netdev)
return ERR_PTR(-ENOMEM);
SET_NETDEV_DEV(netdev, &pdev->dev);
nn = netdev_priv(netdev);
nn->dp.netdev = netdev;
} else {
nn = vzalloc(sizeof(*nn));
if (!nn)
return ERR_PTR(-ENOMEM);
}
nn->dp.netdev = netdev;
nn->dp.dev = &pdev->dev; nn->dp.dev = &pdev->dev;
nn->pdev = pdev; nn->pdev = pdev;
...@@ -3086,7 +3407,10 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, ...@@ -3086,7 +3407,10 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev,
*/ */
void nfp_net_free(struct nfp_net *nn) void nfp_net_free(struct nfp_net *nn)
{ {
free_netdev(nn->dp.netdev); if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
vfree(nn);
} }
/** /**
...@@ -3157,52 +3481,13 @@ static void nfp_net_irqmod_init(struct nfp_net *nn) ...@@ -3157,52 +3481,13 @@ static void nfp_net_irqmod_init(struct nfp_net *nn)
nn->tx_coalesce_max_frames = 64; nn->tx_coalesce_max_frames = 64;
} }
/** static void nfp_net_netdev_init(struct nfp_net *nn)
* nfp_net_init() - Initialise/finalise the nfp_net structure
* @nn: NFP Net device structure
*
* Return: 0 on success or negative errno on error.
*/
int nfp_net_init(struct nfp_net *nn)
{ {
struct net_device *netdev = nn->dp.netdev; struct net_device *netdev = nn->dp.netdev;
int err;
nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
/* Chained metadata is signalled by capabilities except in version 4 */
nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
/* Determine RX packet/metadata boundary offset */ netdev->mtu = nn->dp.mtu;
if (nn->fw_ver.major >= 2) {
u32 reg;
reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
if (reg > NFP_NET_MAX_PREPEND) {
nn_err(nn, "Invalid rx offset: %d\n", reg);
return -EINVAL;
}
nn->dp.rx_offset = reg;
} else {
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
netdev->mtu = nn->max_mtu;
else
netdev->mtu = NFP_NET_DEFAULT_MTU;
nn->dp.mtu = netdev->mtu;
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
/* Advertise/enable offloads based on capabilities /* Advertise/enable offloads based on capabilities
* *
...@@ -3232,12 +3517,8 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3232,12 +3517,8 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
NFP_NET_CFG_CTRL_LSO; NFP_NET_CFG_CTRL_LSO;
} }
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) { if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH; netdev->hw_features |= NETIF_F_RXHASH;
nfp_net_rss_init(nn);
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
NFP_NET_CFG_CTRL_RSS;
}
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) { nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO) if (nn->cap & NFP_NET_CFG_CTRL_LSO)
...@@ -3272,6 +3553,69 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3272,6 +3553,69 @@ int nfp_net_init(struct nfp_net *nn)
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
/* Finalise the netdev setup */
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);
}
/**
* nfp_net_init() - Initialise/finalise the nfp_net structure
* @nn: NFP Net device structure
*
* Return: 0 on success or negative errno on error.
*/
int nfp_net_init(struct nfp_net *nn)
{
int err;
nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
/* Chained metadata is signalled by capabilities except in version 4 */
nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
!nn->dp.netdev ||
nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
/* Determine RX packet/metadata boundary offset */
if (nn->fw_ver.major >= 2) {
u32 reg;
reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
if (reg > NFP_NET_MAX_PREPEND) {
nn_err(nn, "Invalid rx offset: %d\n", reg);
return -EINVAL;
}
nn->dp.rx_offset = reg;
} else {
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
nn->dp.mtu = nn->max_mtu;
else
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_init(nn);
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
NFP_NET_CFG_CTRL_RSS;
}
/* Allow L2 Broadcast and Multicast through by default, if supported */ /* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC) if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
...@@ -3284,6 +3628,9 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3284,6 +3628,9 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
} }
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
/* Stash the re-configuration queue away. First odd queue in TX Bar */ /* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
...@@ -3296,20 +3643,11 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3296,20 +3643,11 @@ int nfp_net_init(struct nfp_net *nn)
if (err) if (err)
return err; return err;
/* Finalise the netdev setup */
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);
nfp_net_vecs_init(nn); nfp_net_vecs_init(nn);
return register_netdev(netdev); if (!nn->dp.netdev)
return 0;
return register_netdev(nn->dp.netdev);
} }
/** /**
...@@ -3318,6 +3656,9 @@ int nfp_net_init(struct nfp_net *nn) ...@@ -3318,6 +3656,9 @@ int nfp_net_init(struct nfp_net *nn)
*/ */
void nfp_net_clean(struct nfp_net *nn) void nfp_net_clean(struct nfp_net *nn)
{ {
if (!nn->dp.netdev)
return;
unregister_netdev(nn->dp.netdev); unregister_netdev(nn->dp.netdev);
if (nn->dp.xdp_prog) if (nn->dp.xdp_prog)
......
...@@ -71,8 +71,11 @@ ...@@ -71,8 +71,11 @@
#define NFP_NET_META_FIELD_SIZE 4 #define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */ #define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2 #define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */ #define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_META_PORT_ID_CTRL ~0U
/** /**
* Hash type pre-pended when a RSS hash was computed * Hash type pre-pended when a RSS hash was computed
*/ */
......
...@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) ...@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out; goto out;
nn = r_vec->nfp_net; nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring; rx_ring = r_vec->rx_ring;
if (!netif_running(nn->dp.netdev)) if (!nfp_net_running(nn))
goto out; goto out;
rxd_cnt = rx_ring->cnt; rxd_cnt = rx_ring->cnt;
...@@ -138,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) ...@@ -138,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring) if (!r_vec->nfp_net || !tx_ring)
goto out; goto out;
nn = r_vec->nfp_net; nn = r_vec->nfp_net;
if (!netif_running(nn->dp.netdev)) if (!nfp_net_running(nn))
goto out; goto out;
txd_cnt = tx_ring->cnt; txd_cnt = tx_ring->cnt;
...@@ -209,7 +209,10 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) ...@@ -209,7 +209,10 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
if (IS_ERR_OR_NULL(nfp_dir)) if (IS_ERR_OR_NULL(nfp_dir))
return; return;
sprintf(name, "vnic%d", id); if (nfp_net_is_data_vnic(nn))
sprintf(name, "vnic%d", id);
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir); nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir)) if (IS_ERR_OR_NULL(nn->debugfs_dir))
return; return;
......
...@@ -223,65 +223,37 @@ static int nfp_net_pf_get_app_id(struct nfp_pf *pf) ...@@ -223,65 +223,37 @@ static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
NFP_APP_CORE_NIC); NFP_APP_CORE_NIC);
} }
static unsigned int static u8 __iomem *
nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar, nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int stride, u32 start_off, u32 num_off) unsigned int min_size, struct nfp_cpp_area **area)
{ {
unsigned int i, min_qc, max_qc; const struct nfp_rtsym *sym;
min_qc = readl(ctrl_bar + start_off);
max_qc = min_qc;
for (i = 0; i < pf->max_data_vnics; i++) {
/* To make our lives simpler only accept configuration where
* queues are allocated to PFs in order (queues of PFn all have
* indexes lower than PFn+1).
*/
if (max_qc > readl(ctrl_bar + start_off))
return 0;
max_qc = readl(ctrl_bar + start_off);
max_qc += readl(ctrl_bar + num_off) * stride;
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
return max_qc - min_qc;
}
static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
{
const struct nfp_rtsym *ctrl_sym;
u8 __iomem *ctrl_bar;
char pf_symbol[256]; char pf_symbol[256];
u8 __iomem *mem;
snprintf(pf_symbol, sizeof(pf_symbol), "_pf%u_net_bar0", snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
nfp_cppcore_pcie_unit(pf->cpp)); nfp_cppcore_pcie_unit(pf->cpp));
ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol); sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
if (!ctrl_sym) { if (!sym) {
dev_err(&pf->pdev->dev, nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
"Failed to find PF BAR0 symbol %s\n", pf_symbol); return (u8 __iomem *)ERR_PTR(-ENOENT);
return NULL;
} }
if (ctrl_sym->size < pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE) { if (sym->size < min_size) {
dev_err(&pf->pdev->dev, nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
"PF BAR0 too small to contain %d vNICs\n", return (u8 __iomem *)ERR_PTR(-EINVAL);
pf->max_data_vnics);
return NULL;
} }
ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl", mem = nfp_net_map_area(pf->cpp, name, sym->domain, sym->target,
ctrl_sym->domain, ctrl_sym->target, sym->addr, sym->size, area);
ctrl_sym->addr, ctrl_sym->size, if (IS_ERR(mem)) {
&pf->data_vnic_bar); nfp_err(pf->cpp, "Failed to map PF symbol %s: %ld\n",
if (IS_ERR(ctrl_bar)) { pf_symbol, PTR_ERR(mem));
dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n", return mem;
PTR_ERR(ctrl_bar));
return NULL;
} }
return ctrl_bar; return mem;
} }
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
...@@ -294,45 +266,47 @@ static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) ...@@ -294,45 +266,47 @@ static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
static void nfp_net_pf_free_vnics(struct nfp_pf *pf) static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
{ {
struct nfp_net *nn; struct nfp_net *nn, *next;
while (!list_empty(&pf->vnics)) { list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list); if (nfp_net_is_data_vnic(nn))
nfp_net_pf_free_vnic(pf, nn); nfp_net_pf_free_vnic(pf, nn);
}
} }
static struct nfp_net * static struct nfp_net *
nfp_net_pf_alloc_vnic(struct nfp_pf *pf, void __iomem *ctrl_bar, nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
void __iomem *tx_bar, void __iomem *rx_bar, void __iomem *ctrl_bar, void __iomem *qc_bar,
int stride, struct nfp_net_fw_version *fw_ver, int stride, unsigned int eth_id)
unsigned int eth_id)
{ {
u32 n_tx_rings, n_rx_rings; u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
struct nfp_net *nn; struct nfp_net *nn;
int err; int err;
tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */ /* Allocate and initialise the vNIC */
nn = nfp_net_alloc(pf->pdev, n_tx_rings, n_rx_rings); nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn)) if (IS_ERR(nn))
return nn; return nn;
nn->app = pf->app; nn->app = pf->app;
nn->fw_ver = *fw_ver; nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->dp.ctrl_bar = ctrl_bar; nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar; nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = rx_bar; nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0; nn->dp.is_vf = 0;
nn->stride_rx = stride; nn->stride_rx = stride;
nn->stride_tx = stride; nn->stride_tx = stride;
err = nfp_app_vnic_init(pf->app, nn, eth_id); if (needs_netdev) {
if (err) { err = nfp_app_vnic_init(pf->app, nn, eth_id);
nfp_net_free(nn); if (err) {
return ERR_PTR(err); nfp_net_free(nn);
return ERR_PTR(err);
}
} }
pf->num_vnics++; pf->num_vnics++;
...@@ -376,27 +350,15 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) ...@@ -376,27 +350,15 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
static int static int
nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar, void __iomem *qc_bar, int stride)
int stride, struct nfp_net_fw_version *fw_ver)
{ {
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
struct nfp_net *nn; struct nfp_net *nn;
unsigned int i; unsigned int i;
int err; int err;
prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
for (i = 0; i < pf->max_data_vnics; i++) { for (i = 0; i < pf->max_data_vnics; i++) {
tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); stride, i);
tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
nn = nfp_net_pf_alloc_vnic(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver, i);
if (IS_ERR(nn)) { if (IS_ERR(nn)) {
err = PTR_ERR(nn); err = PTR_ERR(nn);
goto err_free_prev; goto err_free_prev;
...@@ -430,21 +392,10 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn) ...@@ -430,21 +392,10 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
nfp_app_vnic_clean(pf->app, nn); nfp_app_vnic_clean(pf->app, nn);
} }
static int static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
void __iomem *ctrl_bar, void __iomem *tx_bar,
void __iomem *rx_bar, int stride,
struct nfp_net_fw_version *fw_ver)
{ {
unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left; unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
struct nfp_net *nn; struct nfp_net *nn;
int err;
/* Allocate the vnics and do basic init */
err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver);
if (err)
return err;
/* Get MSI-X vectors */ /* Get MSI-X vectors */
wanted_irqs = 0; wanted_irqs = 0;
...@@ -452,18 +403,16 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf, ...@@ -452,18 +403,16 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL); GFP_KERNEL);
if (!pf->irq_entries) { if (!pf->irq_entries)
err = -ENOMEM; return -ENOMEM;
goto err_nn_free;
}
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
NFP_NET_MIN_VNIC_IRQS * pf->num_vnics, NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
wanted_irqs); wanted_irqs);
if (!num_irqs) { if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
err = -ENOMEM; kfree(pf->irq_entries);
goto err_vec_free; return -ENOMEM;
} }
/* Distribute IRQs to vNICs */ /* Distribute IRQs to vNICs */
...@@ -472,16 +421,34 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf, ...@@ -472,16 +421,34 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
list_for_each_entry(nn, &pf->vnics, vnic_list) { list_for_each_entry(nn, &pf->vnics, vnic_list) {
unsigned int n; unsigned int n;
n = DIV_ROUND_UP(irqs_left, vnics_left); n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
DIV_ROUND_UP(irqs_left, vnics_left));
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n); n);
irqs_left -= n; irqs_left -= n;
vnics_left--; vnics_left--;
} }
return 0;
}
static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
{
nfp_net_irqs_disable(pf->pdev);
kfree(pf->irq_entries);
}
static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
{
struct nfp_net *nn;
unsigned int id;
int err;
/* Finish vNIC init and register */ /* Finish vNIC init and register */
id = 0; id = 0;
list_for_each_entry(nn, &pf->vnics, vnic_list) { list_for_each_entry(nn, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
continue;
err = nfp_net_pf_init_vnic(pf, nn, id); err = nfp_net_pf_init_vnic(pf, nn, id);
if (err) if (err)
goto err_prev_deinit; goto err_prev_deinit;
...@@ -493,17 +460,15 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf, ...@@ -493,17 +460,15 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
err_prev_deinit: err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list) list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
nfp_net_pf_clean_vnic(pf, nn); if (nfp_net_is_data_vnic(nn))
nfp_net_irqs_disable(pf->pdev); nfp_net_pf_clean_vnic(pf, nn);
err_vec_free:
kfree(pf->irq_entries);
err_nn_free:
nfp_net_pf_free_vnics(pf);
return err; return err;
} }
static int nfp_net_pf_app_init(struct nfp_pf *pf) static int
nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
{ {
u8 __iomem *ctrl_bar;
int err; int err;
pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf)); pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
...@@ -514,8 +479,28 @@ static int nfp_net_pf_app_init(struct nfp_pf *pf) ...@@ -514,8 +479,28 @@ static int nfp_net_pf_app_init(struct nfp_pf *pf)
if (err) if (err)
goto err_free; goto err_free;
if (!nfp_app_needs_ctrl_vnic(pf->app))
return 0;
ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
NFP_PF_CSR_SLICE_SIZE,
&pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) {
err = PTR_ERR(ctrl_bar);
goto err_free;
}
pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
stride, 0);
if (IS_ERR(pf->ctrl_vnic)) {
err = PTR_ERR(pf->ctrl_vnic);
goto err_unmap;
}
return 0; return 0;
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_free: err_free:
nfp_app_free(pf->app); nfp_app_free(pf->app);
return err; return err;
...@@ -523,21 +508,79 @@ static int nfp_net_pf_app_init(struct nfp_pf *pf) ...@@ -523,21 +508,79 @@ static int nfp_net_pf_app_init(struct nfp_pf *pf)
static void nfp_net_pf_app_clean(struct nfp_pf *pf) static void nfp_net_pf_app_clean(struct nfp_pf *pf)
{ {
if (pf->ctrl_vnic) {
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
nfp_app_free(pf->app); nfp_app_free(pf->app);
pf->app = NULL; pf->app = NULL;
} }
static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
{
int err;
if (!pf->ctrl_vnic)
return 0;
err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
if (err)
return err;
err = nfp_ctrl_open(pf->ctrl_vnic);
if (err)
goto err_clean_ctrl;
return 0;
err_clean_ctrl:
nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
return err;
}
static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
{
if (!pf->ctrl_vnic)
return;
nfp_ctrl_close(pf->ctrl_vnic);
nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
}
static int nfp_net_pf_app_start(struct nfp_pf *pf)
{
int err;
err = nfp_net_pf_app_start_ctrl(pf);
if (err)
return err;
err = nfp_app_start(pf->app, pf->ctrl_vnic);
if (err)
goto err_ctrl_stop;
return 0;
err_ctrl_stop:
nfp_net_pf_app_stop_ctrl(pf);
return err;
}
static void nfp_net_pf_app_stop(struct nfp_pf *pf)
{
nfp_app_stop(pf->app);
nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_remove_finish(struct nfp_pf *pf) static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{ {
nfp_net_pf_app_stop(pf);
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir); nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_irqs_disable(pf->pdev); nfp_net_pf_free_irqs(pf);
kfree(pf->irq_entries);
nfp_net_pf_app_clean(pf); nfp_net_pf_app_clean(pf);
nfp_cpp_area_release_free(pf->rx_area); nfp_cpp_area_release_free(pf->qc_area);
nfp_cpp_area_release_free(pf->tx_area);
nfp_cpp_area_release_free(pf->data_vnic_bar); nfp_cpp_area_release_free(pf->data_vnic_bar);
} }
...@@ -661,11 +704,9 @@ int nfp_net_refresh_eth_port(struct nfp_port *port) ...@@ -661,11 +704,9 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/ */
int nfp_net_pci_probe(struct nfp_pf *pf) int nfp_net_pci_probe(struct nfp_pf *pf)
{ {
u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
u32 total_tx_qcs, total_rx_qcs;
struct nfp_net_fw_version fw_ver; struct nfp_net_fw_version fw_ver;
u32 tx_area_sz, rx_area_sz; u8 __iomem *ctrl_bar, *qc_bar;
u32 start_q; u32 ctrl_bar_sz;
int stride; int stride;
int err; int err;
...@@ -684,9 +725,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -684,9 +725,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_unlock; goto err_unlock;
} }
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
if (!ctrl_bar) { ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; ctrl_bar_sz, &pf->data_vnic_bar);
if (IS_ERR(ctrl_bar)) {
err = PTR_ERR(ctrl_bar);
if (!pf->fw_loaded && err == -ENOENT)
err = -EPROBE_DEFER;
goto err_unlock; goto err_unlock;
} }
...@@ -704,7 +749,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -704,7 +749,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
} else { } else {
switch (fw_ver.major) { switch (fw_ver.major) {
case 1 ... 4: case 1 ... 5:
stride = 4; stride = 4;
break; break;
default: default:
...@@ -716,67 +761,54 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -716,67 +761,54 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
} }
} }
/* Find how many QC structs need to be mapped */ /* Map queues */
total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
NFP_NET_CFG_START_TXQ, NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
NFP_NET_CFG_MAX_TXRINGS); &pf->qc_area);
total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, if (IS_ERR(qc_bar)) {
NFP_NET_CFG_START_RXQ, nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
NFP_NET_CFG_MAX_RXRINGS); err = PTR_ERR(qc_bar);
if (!total_tx_qcs || !total_rx_qcs) {
nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
total_tx_qcs, total_rx_qcs);
err = -EINVAL;
goto err_ctrl_unmap;
}
tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
/* Map TX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
NFP_PCIE_QUEUE(start_q),
tx_area_sz, &pf->tx_area);
if (IS_ERR(tx_bar)) {
nfp_err(pf->cpp, "Failed to map TX area.\n");
err = PTR_ERR(tx_bar);
goto err_ctrl_unmap; goto err_ctrl_unmap;
} }
/* Map RX queues */ err = nfp_net_pf_app_init(pf, qc_bar, stride);
start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
NFP_PCIE_QUEUE(start_q),
rx_area_sz, &pf->rx_area);
if (IS_ERR(rx_bar)) {
nfp_err(pf->cpp, "Failed to map RX area.\n");
err = PTR_ERR(rx_bar);
goto err_unmap_tx;
}
err = nfp_net_pf_app_init(pf);
if (err) if (err)
goto err_unmap_rx; goto err_unmap_qc;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev); pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, tx_bar, rx_bar, /* Allocate the vnics and do basic init */
stride, &fw_ver); err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
if (err) if (err)
goto err_clean_ddir; goto err_clean_ddir;
err = nfp_net_pf_alloc_irqs(pf);
if (err)
goto err_free_vnics;
err = nfp_net_pf_app_start(pf);
if (err)
goto err_free_irqs;
err = nfp_net_pf_init_vnics(pf);
if (err)
goto err_stop_app;
mutex_unlock(&pf->lock); mutex_unlock(&pf->lock);
return 0; return 0;
err_stop_app:
nfp_net_pf_app_stop(pf);
err_free_irqs:
nfp_net_pf_free_irqs(pf);
err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir: err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir); nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_app_clean(pf); nfp_net_pf_app_clean(pf);
err_unmap_rx: err_unmap_qc:
nfp_cpp_area_release_free(pf->rx_area); nfp_cpp_area_release_free(pf->qc_area);
err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_ctrl_unmap: err_ctrl_unmap:
nfp_cpp_area_release_free(pf->data_vnic_bar); nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unlock: err_unlock:
...@@ -793,7 +825,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf) ...@@ -793,7 +825,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
goto out; goto out;
list_for_each_entry(nn, &pf->vnics, vnic_list) list_for_each_entry(nn, &pf->vnics, vnic_list)
nfp_net_pf_clean_vnic(pf, nn); if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
nfp_net_pf_free_vnics(pf); nfp_net_pf_free_vnics(pf);
......
...@@ -161,7 +161,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -161,7 +161,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n"); dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else { } else {
switch (fw_ver.major) { switch (fw_ver.major) {
case 1 ... 4: case 1 ... 5:
stride = 4; stride = 4;
tx_bar_no = NFP_NET_Q0_BAR; tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = tx_bar_no; rx_bar_no = tx_bar_no;
...@@ -202,7 +202,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -202,7 +202,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq); rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */ /* Allocate and initialise the netdev */
nn = nfp_net_alloc(pdev, max_tx_rings, max_rx_rings); nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) { if (IS_ERR(nn)) {
err = PTR_ERR(nn); err = PTR_ERR(nn);
goto err_ctrl_unmap; goto err_ctrl_unmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment