Commit 6e1a2882 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-TC-block-fixes-app-fallback-and-dev_alloc'

Jakub Kicinski says:

====================
nfp: TC block fixes, app fallback and dev_alloc()

This series has three parts.  First of all John and I fix some
fallout from the TC block conversion.  John also fixes sleeping
in the neigh notifier.

Secondly I reorganise the nfp_app table to make it easier to
deal with excluding apps which have unmet Kconfig dependencies.

Last but not least after the fixes which went into -net some time
ago I refactor the page allocation, add a ethtool counter for
failed allocations and clean the ethtool stat code while at it.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cdc89c91 18f76191
...@@ -27,8 +27,6 @@ nfp-objs := \ ...@@ -27,8 +27,6 @@ nfp-objs := \
nfp_net_sriov.o \ nfp_net_sriov.o \
nfp_netvf_main.o \ nfp_netvf_main.o \
nfp_port.o \ nfp_port.o \
bpf/main.o \
bpf/offload.o \
nic/main.o nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y) ifeq ($(CONFIG_NFP_APP_FLOWER),y)
...@@ -44,6 +42,8 @@ endif ...@@ -44,6 +42,8 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y) ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \ nfp-objs += \
bpf/main.o \
bpf/offload.o \
bpf/verifier.o \ bpf/verifier.o \
bpf/jit.o bpf/jit.o
endif endif
......
...@@ -130,6 +130,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, ...@@ -130,6 +130,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
cls_bpf->common.protocol != htons(ETH_P_ALL) || cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index) cls_bpf->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
return nfp_net_bpf_offload(nn, cls_bpf); return nfp_net_bpf_offload(nn, cls_bpf);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -150,9 +150,6 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, ...@@ -150,9 +150,6 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
unsigned int max_mtu; unsigned int max_mtu;
int ret; int ret;
if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
return -EOPNOTSUPP;
ret = nfp_net_bpf_get_act(nn, cls_bpf); ret = nfp_net_bpf_get_act(nn, cls_bpf);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -50,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb) ...@@ -50,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
struct sk_buff * struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
enum nfp_flower_cmsg_type_port type) enum nfp_flower_cmsg_type_port type, gfp_t flag)
{ {
struct nfp_flower_cmsg_hdr *ch; struct nfp_flower_cmsg_hdr *ch;
struct sk_buff *skb; struct sk_buff *skb;
size += NFP_FLOWER_CMSG_HLEN; size += NFP_FLOWER_CMSG_HLEN;
skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL); skb = nfp_app_ctrl_msg_alloc(app, size, flag);
if (!skb) if (!skb)
return NULL; return NULL;
...@@ -78,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) ...@@ -78,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
unsigned int size; unsigned int size;
size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR); skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
GFP_KERNEL);
if (!skb) if (!skb)
return NULL; return NULL;
...@@ -109,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) ...@@ -109,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
struct sk_buff *skb; struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
NFP_FLOWER_CMSG_TYPE_PORT_MOD); NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;
......
...@@ -458,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work); ...@@ -458,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff * struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
enum nfp_flower_cmsg_type_port type); enum nfp_flower_cmsg_type_port type, gfp_t flag);
#endif #endif
...@@ -115,7 +115,7 @@ struct nfp_flower_priv { ...@@ -115,7 +115,7 @@ struct nfp_flower_priv {
struct mutex nfp_mac_off_lock; struct mutex nfp_mac_off_lock;
struct mutex nfp_mac_index_lock; struct mutex nfp_mac_index_lock;
struct mutex nfp_ipv4_off_lock; struct mutex nfp_ipv4_off_lock;
struct mutex nfp_neigh_off_lock; spinlock_t nfp_neigh_off_lock;
struct ida nfp_mac_off_ids; struct ida nfp_mac_off_ids;
int nfp_mac_off_count; int nfp_mac_off_count;
struct notifier_block nfp_tun_mac_nb; struct notifier_block nfp_tun_mac_nb;
......
...@@ -95,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, ...@@ -95,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype); skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;
...@@ -468,14 +468,14 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -468,14 +468,14 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv) void *type_data, void *cb_priv)
{ {
struct nfp_net *nn = cb_priv; struct nfp_repr *repr = cb_priv;
if (!tc_can_offload(nn->dp.netdev)) if (!tc_can_offload(repr->netdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(nn->app, nn->port->netdev, return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data); type_data);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -485,7 +485,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, ...@@ -485,7 +485,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
static int nfp_flower_setup_tc_block(struct net_device *netdev, static int nfp_flower_setup_tc_block(struct net_device *netdev,
struct tc_block_offload *f) struct tc_block_offload *f)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_repr *repr = netdev_priv(netdev);
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -494,11 +494,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, ...@@ -494,11 +494,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
case TC_BLOCK_BIND: case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, return tcf_block_cb_register(f->block,
nfp_flower_setup_tc_block_cb, nfp_flower_setup_tc_block_cb,
nn, nn); repr, repr);
case TC_BLOCK_UNBIND: case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, tcf_block_cb_unregister(f->block,
nfp_flower_setup_tc_block_cb, nfp_flower_setup_tc_block_cb,
nn); repr);
return 0; return 0;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -224,12 +224,13 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) ...@@ -224,12 +224,13 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
} }
static int static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata) nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
{ {
struct sk_buff *skb; struct sk_buff *skb;
unsigned char *msg; unsigned char *msg;
skb = nfp_flower_cmsg_alloc(app, plen, mtype); skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;
...@@ -246,15 +247,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) ...@@ -246,15 +247,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry; struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage; struct list_head *ptr, *storage;
mutex_lock(&priv->nfp_neigh_off_lock); spin_lock_bh(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) { if (entry->ipv4_addr == ipv4_addr) {
mutex_unlock(&priv->nfp_neigh_off_lock); spin_unlock_bh(&priv->nfp_neigh_off_lock);
return true; return true;
} }
} }
mutex_unlock(&priv->nfp_neigh_off_lock); spin_unlock_bh(&priv->nfp_neigh_off_lock);
return false; return false;
} }
...@@ -264,24 +265,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) ...@@ -264,24 +265,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry; struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage; struct list_head *ptr, *storage;
mutex_lock(&priv->nfp_neigh_off_lock); spin_lock_bh(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) { if (entry->ipv4_addr == ipv4_addr) {
mutex_unlock(&priv->nfp_neigh_off_lock); spin_unlock_bh(&priv->nfp_neigh_off_lock);
return; return;
} }
} }
entry = kmalloc(sizeof(*entry), GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) { if (!entry) {
mutex_unlock(&priv->nfp_neigh_off_lock); spin_unlock_bh(&priv->nfp_neigh_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
return; return;
} }
entry->ipv4_addr = ipv4_addr; entry->ipv4_addr = ipv4_addr;
list_add_tail(&entry->list, &priv->nfp_neigh_off_list); list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
mutex_unlock(&priv->nfp_neigh_off_lock); spin_unlock_bh(&priv->nfp_neigh_off_lock);
} }
static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
...@@ -290,7 +291,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) ...@@ -290,7 +291,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry; struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage; struct list_head *ptr, *storage;
mutex_lock(&priv->nfp_neigh_off_lock); spin_lock_bh(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) { if (entry->ipv4_addr == ipv4_addr) {
...@@ -299,12 +300,12 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) ...@@ -299,12 +300,12 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
break; break;
} }
} }
mutex_unlock(&priv->nfp_neigh_off_lock); spin_unlock_bh(&priv->nfp_neigh_off_lock);
} }
static void static void
nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh) struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{ {
struct nfp_tun_neigh payload; struct nfp_tun_neigh payload;
...@@ -334,7 +335,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -334,7 +335,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
send_msg: send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
sizeof(struct nfp_tun_neigh), sizeof(struct nfp_tun_neigh),
(unsigned char *)&payload); (unsigned char *)&payload, flag);
} }
static int static int
...@@ -385,7 +386,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, ...@@ -385,7 +386,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
#endif #endif
flow.flowi4_proto = IPPROTO_UDP; flow.flowi4_proto = IPPROTO_UDP;
nfp_tun_write_neigh(n->dev, app, &flow, n); nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -423,7 +424,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ...@@ -423,7 +424,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt); ip_rt_put(rt);
if (!n) if (!n)
goto route_fail_warning; goto route_fail_warning;
nfp_tun_write_neigh(n->dev, app, &flow, n); nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
neigh_release(n); neigh_release(n);
return; return;
...@@ -456,7 +457,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app) ...@@ -456,7 +457,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
sizeof(struct nfp_tun_ipv4_addr), sizeof(struct nfp_tun_ipv4_addr),
&payload); &payload, GFP_KERNEL);
} }
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
...@@ -548,7 +549,7 @@ void nfp_tunnel_write_macs(struct nfp_app *app) ...@@ -548,7 +549,7 @@ void nfp_tunnel_write_macs(struct nfp_app *app)
} }
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
pay_size, payload); pay_size, payload, GFP_KERNEL);
kfree(payload); kfree(payload);
...@@ -729,7 +730,7 @@ int nfp_tunnel_config_start(struct nfp_app *app) ...@@ -729,7 +730,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->nfp_ipv4_off_list); INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
/* Initialise priv data for neighbour offloading. */ /* Initialise priv data for neighbour offloading. */
mutex_init(&priv->nfp_neigh_off_lock); spin_lock_init(&priv->nfp_neigh_off_lock);
INIT_LIST_HEAD(&priv->nfp_neigh_off_list); INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
...@@ -769,43 +770,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app) ...@@ -769,43 +770,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
/* Free any memory that may be occupied by MAC list. */ /* Free any memory that may be occupied by MAC list. */
mutex_lock(&priv->nfp_mac_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
list); list);
list_del(&mac_entry->list); list_del(&mac_entry->list);
kfree(mac_entry); kfree(mac_entry);
} }
mutex_unlock(&priv->nfp_mac_off_lock);
/* Free any memory that may be occupied by MAC index list. */ /* Free any memory that may be occupied by MAC index list. */
mutex_lock(&priv->nfp_mac_index_lock);
list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
list); list);
list_del(&mac_idx->list); list_del(&mac_idx->list);
kfree(mac_idx); kfree(mac_idx);
} }
mutex_unlock(&priv->nfp_mac_index_lock);
ida_destroy(&priv->nfp_mac_off_ids); ida_destroy(&priv->nfp_mac_off_ids);
/* Free any memory that may be occupied by ipv4 list. */ /* Free any memory that may be occupied by ipv4 list. */
mutex_lock(&priv->nfp_ipv4_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
list_del(&ip_entry->list); list_del(&ip_entry->list);
kfree(ip_entry); kfree(ip_entry);
} }
mutex_unlock(&priv->nfp_ipv4_off_lock);
/* Free any memory that may be occupied by the route list. */ /* Free any memory that may be occupied by the route list. */
mutex_lock(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
list); list);
list_del(&route_entry->list); list_del(&route_entry->list);
kfree(route_entry); kfree(route_entry);
} }
mutex_unlock(&priv->nfp_neigh_off_lock);
} }
...@@ -43,10 +43,14 @@ ...@@ -43,10 +43,14 @@
#include "nfp_net_repr.h" #include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = { static const struct nfp_app_type *apps[] = {
&app_nic, [NFP_APP_CORE_NIC] = &app_nic,
&app_bpf, #ifdef CONFIG_BPF_SYSCALL
[NFP_APP_BPF_NIC] = &app_bpf,
#else
[NFP_APP_BPF_NIC] = &app_nic,
#endif
#ifdef CONFIG_NFP_APP_FLOWER #ifdef CONFIG_NFP_APP_FLOWER
&app_flower, [NFP_APP_FLOWER_NIC] = &app_flower,
#endif #endif
}; };
...@@ -116,17 +120,13 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, ...@@ -116,17 +120,13 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{ {
struct nfp_app *app; struct nfp_app *app;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(apps); i++) if (id >= ARRAY_SIZE(apps) || !apps[id]) {
if (apps[i]->id == id)
break;
if (i == ARRAY_SIZE(apps)) {
nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id); nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (WARN_ON(!apps[i]->name || !apps[i]->vnic_alloc)) if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL); app = kzalloc(sizeof(*app), GFP_KERNEL);
...@@ -136,7 +136,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) ...@@ -136,7 +136,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
app->pf = pf; app->pf = pf;
app->cpp = pf->cpp; app->cpp = pf->cpp;
app->pdev = pf->pdev; app->pdev = pf->pdev;
app->type = apps[i]; app->type = apps[id];
return app; return app;
} }
......
...@@ -394,6 +394,7 @@ struct nfp_net_rx_ring { ...@@ -394,6 +394,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent * @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered * @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)? * @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
* @irq_vector: Interrupt vector number (use for talking to the OS) * @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector * @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector * @name: Name of the interrupt vector
...@@ -437,6 +438,8 @@ struct nfp_net_r_vector { ...@@ -437,6 +438,8 @@ struct nfp_net_r_vector {
u64 hw_csum_tx_inner; u64 hw_csum_tx_inner;
u64 tx_gather; u64 tx_gather;
u64 tx_lso; u64 tx_lso;
u64 rx_replace_buf_alloc_fail;
u64 tx_errors; u64 tx_errors;
u64 tx_busy; u64 tx_busy;
......
...@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) ...@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
if (!dp->xdp_prog) { if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz); frag = napi_alloc_frag(dp->fl_bufsz);
if (unlikely(!frag))
return NULL;
} else { } else {
struct page *page; struct page *page;
page = alloc_page(GFP_ATOMIC | __GFP_COLD); page = dev_alloc_page();
frag = page ? page_address(page) : NULL; if (unlikely(!page))
} return NULL;
if (!frag) { frag = page_address(page);
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
} }
*dma_addr = nfp_net_dma_map_rx(dp, frag); *dma_addr = nfp_net_dma_map_rx(dp, frag);
...@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, ...@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
{ {
u64_stats_update_begin(&r_vec->rx_sync); u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++; r_vec->rx_drops++;
/* If we have both skb and rxbuf the replacement buffer allocation
* must have failed, count this as an alloc failure.
*/
if (skb && rxbuf)
r_vec->rx_replace_buf_alloc_fail++;
u64_stats_update_end(&r_vec->rx_sync); u64_stats_update_end(&r_vec->rx_sync);
/* skb is build based on the frag, free_skb() would free the frag /* skb is build based on the frag, free_skb() would free the frag
......
...@@ -181,7 +181,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { ...@@ -181,7 +181,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9 #define NN_ET_SWITCH_STATS_LEN 9
#define NN_ET_RVEC_GATHER_STATS 7 #define NN_RVEC_GATHER_STATS 8
#define NN_RVEC_PER_Q_STATS 3
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{ {
...@@ -427,7 +428,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) ...@@ -427,7 +428,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3; return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
} }
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
...@@ -444,6 +445,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) ...@@ -444,6 +445,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok"); data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_err"); data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum"); data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum"); data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather"); data = nfp_pr_et(data, "tx_gather");
...@@ -454,9 +456,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) ...@@ -454,9 +456,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
{ {
u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
u64 tmp[NN_ET_RVEC_GATHER_STATS]; u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j; unsigned int i, j;
for (i = 0; i < nn->dp.num_r_vecs; i++) { for (i = 0; i < nn->dp.num_r_vecs; i++) {
...@@ -468,25 +470,26 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) ...@@ -468,25 +470,26 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error; tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do { do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts; data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy; data[2] = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx; tmp[4] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather; tmp[6] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso; tmp[7] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += 3; data += NN_RVEC_PER_Q_STATS;
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j]; gathered_stats[j] += tmp[j];
} }
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j]; *data++ = gathered_stats[j];
return data; return data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment