Commit b6ed55cb authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-Flower-flow-merging'

Simon Horman says:

====================
nfp: Flower flow merging

John Hurley says,

These patches deal with 'implicit recirculation' on the NFP. This is a
firmware feature whereby a packet egresses to an 'internal' port meaning
that it will recirculate back to the header extract phase with the
'internal' port now marked as its ingress port. This internal port can
then be matched on by another rule. This process simulates how OvS
datapath outputs to an internal port. The FW traces the packet's
recirculation route and sends a 'merge hint' to the driver telling it
which flows it matched against. The driver can then decide if these flows
can be merged to a single rule and offloaded.

The patches deal with the following issues:

- assigning/freeing IDs to/from each of these new internal ports
- offloading rules that match on internal ports
- offloading neighbour table entries whose egress port is internal
- handling fallback traffic with an internal port as ingress
- using merge hints to create 'faster path' flows and tracking stats etc.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 47a1a225 8af56f40
...@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, ...@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
} }
} }
static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) static struct net_device *
nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{ {
enum nfp_repr_type rtype; enum nfp_repr_type rtype;
struct nfp_reprs *reprs; struct nfp_reprs *reprs;
...@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = { ...@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = {
.eswitch_mode_get = nfp_abm_eswitch_mode_get, .eswitch_mode_get = nfp_abm_eswitch_mode_get,
.eswitch_mode_set = nfp_abm_eswitch_mode_set, .eswitch_mode_set = nfp_abm_eswitch_mode_set,
.repr_get = nfp_abm_repr_get, .dev_get = nfp_abm_repr_get,
}; };
...@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
rtnl_lock(); rtnl_lock();
rcu_read_lock(); rcu_read_lock();
netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock(); rcu_read_unlock();
if (!netdev) { if (!netdev) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
...@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
msg = nfp_flower_cmsg_get_data(skb); msg = nfp_flower_cmsg_get_data(skb);
rcu_read_lock(); rcu_read_lock();
exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock(); rcu_read_unlock();
if (!exists) { if (!exists) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
...@@ -204,6 +204,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -204,6 +204,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
wake_up(&priv->reify_wait_queue); wake_up(&priv->reify_wait_queue);
} }
static void
nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
{
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_flower_cmsg_merge_hint *msg;
struct nfp_fl_payload *sub_flows[2];
int err, i, flow_cnt;
msg = nfp_flower_cmsg_get_data(skb);
/* msg->count starts at 0 and always assumes at least 1 entry. */
flow_cnt = msg->count + 1;
if (msg_len < struct_size(msg, flow, flow_cnt)) {
nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %ld\n",
msg_len, struct_size(msg, flow, flow_cnt));
return;
}
if (flow_cnt != 2) {
nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
flow_cnt);
return;
}
rtnl_lock();
for (i = 0; i < flow_cnt; i++) {
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
if (!sub_flows[i]) {
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
goto err_rtnl_unlock;
}
}
err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
/* Only warn on memory fail. Hint veto will not break functionality. */
if (err == -ENOMEM)
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
err_rtnl_unlock:
rtnl_unlock();
}
static void static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{ {
...@@ -222,6 +266,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -222,6 +266,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_PORT_MOD: case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb); nfp_flower_cmsg_portmod_rx(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
nfp_flower_cmsg_merge_hint_rx(app, skb);
break;
}
goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb); nfp_tunnel_request_route(app, skb);
break; break;
...@@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
} }
/* fall through */ /* fall through */
default: default:
err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type); type);
goto out; goto out;
......
...@@ -402,11 +402,13 @@ struct nfp_flower_cmsg_hdr { ...@@ -402,11 +402,13 @@ struct nfp_flower_cmsg_hdr {
/* Types defined for port related control messages */ /* Types defined for port related control messages */
enum nfp_flower_cmsg_type_port { enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4, NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12, NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
...@@ -451,6 +453,16 @@ struct nfp_flower_cmsg_portreify { ...@@ -451,6 +453,16 @@ struct nfp_flower_cmsg_portreify {
#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0) #define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */
struct nfp_flower_cmsg_merge_hint {
u8 reserved[3];
u8 count;
struct {
__be32 host_ctx;
__be64 host_cookie;
} __packed flow[0];
};
enum nfp_flower_cmsg_port_type { enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0, NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
...@@ -473,6 +485,13 @@ enum nfp_flower_cmsg_port_vnic_type { ...@@ -473,6 +485,13 @@ enum nfp_flower_cmsg_port_vnic_type {
#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0) #define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0) #define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT);
}
static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port) static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
{ {
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) | return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
......
...@@ -22,6 +22,9 @@ ...@@ -22,6 +22,9 @@
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
#define NFP_MIN_INT_PORT_ID 1
#define NFP_MAX_INT_PORT_ID 256
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{ {
return "FLOWER"; return "FLOWER";
...@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) ...@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV; return DEVLINK_ESWITCH_MODE_SWITCHDEV;
} }
static int
nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
struct net_device *netdev)
{
struct net_device *entry;
int i, id = 0;
rcu_read_lock();
idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
if (entry == netdev) {
id = i;
break;
}
rcu_read_unlock();
return id;
}
static int
nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
int id;
id = nfp_flower_lookup_internal_port_id(priv, netdev);
if (id > 0)
return id;
idr_preload(GFP_ATOMIC);
spin_lock_bh(&priv->internal_ports.lock);
id = idr_alloc(&priv->internal_ports.port_ids, netdev,
NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
spin_unlock_bh(&priv->internal_ports.lock);
idr_preload_end();
return id;
}
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev)
{
int ext_port;
if (nfp_netdev_is_nfp_repr(netdev)) {
return nfp_repr_get_port_id(netdev);
} else if (nfp_flower_internal_port_can_offload(app, netdev)) {
ext_port = nfp_flower_get_internal_port_id(app, netdev);
if (ext_port < 0)
return 0;
return nfp_flower_internal_port_get_port_id(ext_port);
}
return 0;
}
static struct net_device *
nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
{
struct nfp_flower_priv *priv = app->priv;
struct net_device *netdev;
rcu_read_lock();
netdev = idr_find(&priv->internal_ports.port_ids, port_id);
rcu_read_unlock();
return netdev;
}
static void
nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
int id;
id = nfp_flower_lookup_internal_port_id(priv, netdev);
if (!id)
return;
spin_lock_bh(&priv->internal_ports.lock);
idr_remove(&priv->internal_ports.port_ids, id);
spin_unlock_bh(&priv->internal_ports.lock);
}
static int
nfp_flower_internal_port_event_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event)
{
if (event == NETDEV_UNREGISTER &&
nfp_flower_internal_port_can_offload(app, netdev))
nfp_flower_free_internal_port_id(app, netdev);
return NOTIFY_OK;
}
static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
{
spin_lock_init(&priv->internal_ports.lock);
idr_init(&priv->internal_ports.port_ids);
}
static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
{
idr_destroy(&priv->internal_ports.port_ids);
}
static struct nfp_flower_non_repr_priv * static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
{ {
...@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) ...@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
} }
static struct net_device * static struct net_device *
nfp_flower_repr_get(struct nfp_app *app, u32 port_id) nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{ {
enum nfp_repr_type repr_type; enum nfp_repr_type repr_type;
struct nfp_reprs *reprs; struct nfp_reprs *reprs;
u8 port = 0; u8 port = 0;
/* Check if the port is internal. */
if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
if (redir_egress)
*redir_egress = true;
port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
return nfp_flower_get_netdev_from_internal_port_id(app, port);
}
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
if (repr_type > NFP_REPR_TYPE_MAX) if (repr_type > NFP_REPR_TYPE_MAX)
return NULL; return NULL;
...@@ -641,11 +760,30 @@ static int nfp_flower_init(struct nfp_app *app) ...@@ -641,11 +760,30 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata; goto err_cleanup_metadata;
} }
if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
/* Tell the firmware that the driver supports flow merging. */
err = nfp_rtsym_write_le(app->pf->rtbl,
"_abi_flower_merge_hint_enable", 1);
if (!err) {
app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
nfp_flower_internal_port_init(app_priv);
} else if (err == -ENOENT) {
nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
} else {
goto err_lag_clean;
}
} else {
nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
}
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv);
return 0; return 0;
err_lag_clean:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
err_cleanup_metadata: err_cleanup_metadata:
nfp_flower_metadata_cleanup(app); nfp_flower_metadata_cleanup(app);
err_free_app_priv: err_free_app_priv:
...@@ -664,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app) ...@@ -664,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app)
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag); nfp_flower_lag_cleanup(&app_priv->nfp_lag);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
nfp_flower_internal_port_cleanup(app_priv);
nfp_flower_metadata_cleanup(app); nfp_flower_metadata_cleanup(app);
vfree(app->priv); vfree(app->priv);
app->priv = NULL; app->priv = NULL;
...@@ -762,6 +903,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, ...@@ -762,6 +903,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
if (ret & NOTIFY_STOP_MASK) if (ret & NOTIFY_STOP_MASK)
return ret; return ret;
ret = nfp_flower_internal_port_event_handler(app, netdev, event);
if (ret & NOTIFY_STOP_MASK)
return ret;
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
} }
...@@ -800,7 +945,7 @@ const struct nfp_app_type app_flower = { ...@@ -800,7 +945,7 @@ const struct nfp_app_type app_flower = {
.sriov_disable = nfp_flower_sriov_disable, .sriov_disable = nfp_flower_sriov_disable,
.eswitch_mode_get = eswitch_mode_get, .eswitch_mode_get = eswitch_mode_get,
.repr_get = nfp_flower_repr_get, .dev_get = nfp_flower_dev_get,
.setup_tc = nfp_flower_setup_tc, .setup_tc = nfp_flower_setup_tc,
}; };
...@@ -39,6 +39,8 @@ struct nfp_app; ...@@ -39,6 +39,8 @@ struct nfp_app;
#define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2) #define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31) #define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id { struct nfp_fl_mask_id {
...@@ -114,6 +116,16 @@ struct nfp_fl_lag { ...@@ -114,6 +116,16 @@ struct nfp_fl_lag {
struct sk_buff_head retrans_skbs; struct sk_buff_head retrans_skbs;
}; };
/**
* struct nfp_fl_internal_ports - Flower APP priv data for additional ports
* @port_ids: Assignment of ids to any additional ports
* @lock: Lock for extra ports list
*/
struct nfp_fl_internal_ports {
struct idr port_ids;
spinlock_t lock;
};
/** /**
* struct nfp_flower_priv - Flower APP per-vNIC priv data * struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app * @app: Back pointer to app
...@@ -128,6 +140,7 @@ struct nfp_fl_lag { ...@@ -128,6 +140,7 @@ struct nfp_fl_lag {
* @flow_table: Hash table used to store flower rules * @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules * @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates * @stats_lock: Lock for flower rule stats updates
* @stats_ctx_table: Hash table to map stats contexts to its flow rule
* @cmsg_work: Workqueue for control messages processing * @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message * @cmsg_skbs_high: List of higher priority skbs for control message
* processing * processing
...@@ -143,6 +156,7 @@ struct nfp_fl_lag { ...@@ -143,6 +156,7 @@ struct nfp_fl_lag {
* @non_repr_priv: List of offloaded non-repr ports and their priv data * @non_repr_priv: List of offloaded non-repr ports and their priv data
* @active_mem_unit: Current active memory unit for flower rules * @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules * @total_mem_units: Total number of available memory units for flower rules
* @internal_ports: Internal port ids used in offloaded rules
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -157,6 +171,7 @@ struct nfp_flower_priv { ...@@ -157,6 +171,7 @@ struct nfp_flower_priv {
struct rhashtable flow_table; struct rhashtable flow_table;
struct nfp_fl_stats *stats; struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */ spinlock_t stats_lock; /* lock stats */
struct rhashtable stats_ctx_table;
struct work_struct cmsg_work; struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low; struct sk_buff_head cmsg_skbs_low;
...@@ -169,6 +184,7 @@ struct nfp_flower_priv { ...@@ -169,6 +184,7 @@ struct nfp_flower_priv {
struct list_head non_repr_priv; struct list_head non_repr_priv;
unsigned int active_mem_unit; unsigned int active_mem_unit;
unsigned int total_mem_units; unsigned int total_mem_units;
struct nfp_fl_internal_ports internal_ports;
}; };
/** /**
...@@ -236,6 +252,25 @@ struct nfp_fl_payload { ...@@ -236,6 +252,25 @@ struct nfp_fl_payload {
char *unmasked_data; char *unmasked_data;
char *mask_data; char *mask_data;
char *action_data; char *action_data;
struct list_head linked_flows;
bool in_hw;
};
struct nfp_fl_payload_link {
/* A link contains a pointer to a merge flow and an associated sub_flow.
* Each merge flow will feature in 2 links to its underlying sub_flows.
* A sub_flow will have at least 1 link to a merge flow or more if it
* has been used to create multiple merge flows.
*
* For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
* all links to sub_flows (sub_flow.flow) via merge.list.
* For a sub_flow, 'linked_flows' gives all links to merge flows it has
* formed (merge_flow.flow) via sub_flow.list.
*/
struct {
struct list_head list;
struct nfp_fl_payload *flow;
} merge_flow, sub_flow;
}; };
extern const struct rhashtable_params nfp_flower_table_params; extern const struct rhashtable_params nfp_flower_table_params;
...@@ -247,12 +282,40 @@ struct nfp_fl_stats_frame { ...@@ -247,12 +282,40 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie; __be64 stats_cookie;
}; };
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
{
struct nfp_flower_priv *app_priv = app->priv;
if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
return false;
if (!netdev->rtnl_link_ops)
return false;
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
return true;
return false;
}
/* The address of the merged flow acts as its cookie.
* Cookies supplied to us by TC flower are also addresses to allocated
* memory and thus this scheme should not generate any collisions.
*/
static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
{
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
}
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split); unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data); enum tc_setup_type type, void *type_data);
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app, int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow, struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls, struct nfp_fl_key_ls *key_ls,
...@@ -267,6 +330,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -267,6 +330,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow, struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
struct net_device *netdev); struct net_device *netdev);
void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app, int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow); struct nfp_fl_payload *nfp_flow);
...@@ -274,6 +339,8 @@ struct nfp_fl_payload * ...@@ -274,6 +339,8 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev); struct net_device *netdev);
struct nfp_fl_payload * struct nfp_fl_payload *
nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
...@@ -311,4 +378,6 @@ void ...@@ -311,4 +378,6 @@ void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv); __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
void void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
#endif #endif
...@@ -326,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -326,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type) enum nfp_flower_tun_type tun_type)
{ {
u32 cmsg_port = 0; u32 port_id;
int err; int err;
u8 *ext; u8 *ext;
u8 *msk; u8 *msk;
if (nfp_netdev_is_nfp_repr(netdev)) port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
cmsg_port = nfp_repr_get_port_id(netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size);
...@@ -358,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -358,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */ /* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
cmsg_port, false, tun_type); port_id, false, tun_type);
if (err) if (err)
return err; return err;
/* Populate Mask Port Data. */ /* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
cmsg_port, true, tun_type); port_id, true, tun_type);
if (err) if (err)
return err; return err;
......
...@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg { ...@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
unsigned long cookie; unsigned long cookie;
}; };
struct nfp_fl_stats_ctx_to_flow {
struct rhash_head ht_node;
u32 stats_cxt;
struct nfp_fl_payload *flow;
};
static const struct rhashtable_params stats_ctx_table_params = {
.key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
.head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
.key_len = sizeof(u32),
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
...@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, ...@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry) if (!mask_entry)
return false; return false;
if (meta_flags)
*meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
*mask_id = mask_entry->mask_id; *mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--; mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) { if (!mask_entry->ref_cnt) {
...@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
struct net_device *netdev) struct net_device *netdev)
{ {
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry; struct nfp_fl_payload *check_entry;
u8 new_mask_id; u8 new_mask_id;
u32 stats_cxt; u32 stats_cxt;
int err;
if (nfp_get_stats_entry(app, &stats_cxt)) err = nfp_get_stats_entry(app, &stats_cxt);
return -ENOENT; if (err)
return err;
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->ingress_dev = netdev; nfp_flow->ingress_dev = netdev;
ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
err = -ENOMEM;
goto err_release_stats;
}
ctx_entry->stats_cxt = stats_cxt;
ctx_entry->flow = nfp_flow;
if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
stats_ctx_table_params)) {
err = -ENOMEM;
goto err_free_ctx_entry;
}
new_mask_id = 0; new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data, if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) { &nfp_flow->meta.flags, &new_mask_id)) {
if (nfp_release_stats_entry(app, stats_cxt)) err = -ENOENT;
return -EINVAL; goto err_remove_rhash;
return -ENOENT;
} }
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
...@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) { if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt)) err = -EEXIST;
return -EINVAL; goto err_remove_mask;
if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
NULL, &new_mask_id))
return -EINVAL;
return -EEXIST;
} }
return 0; return 0;
err_remove_mask:
nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
NULL, &new_mask_id);
err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
&ctx_entry->ht_node,
stats_ctx_table_params));
err_free_ctx_entry:
kfree(ctx_entry);
err_release_stats:
nfp_release_stats_entry(app, stats_cxt);
return err;
}
void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
struct nfp_fl_payload *nfp_flow)
{
nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
priv->flower_version++;
} }
int nfp_modify_flow_metadata(struct nfp_app *app, int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow) struct nfp_fl_payload *nfp_flow)
{ {
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0; u8 new_mask_id = 0;
u32 temp_ctx_id; u32 temp_ctx_id;
__nfp_modify_flow_metadata(priv, nfp_flow);
nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags, nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
&new_mask_id); &new_mask_id);
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
priv->flower_version++;
/* Update flow payload with mask ids. */ /* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
/* Release the stats ctx id. */ /* Release the stats ctx id and ctx to flow table entry. */
temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
stats_ctx_table_params);
if (!ctx_entry)
return -ENOENT;
WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
&ctx_entry->ht_node,
stats_ctx_table_params));
kfree(ctx_entry);
return nfp_release_stats_entry(app, temp_ctx_id); return nfp_release_stats_entry(app, temp_ctx_id);
} }
struct nfp_fl_payload *
nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
{
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
stats_ctx_table_params);
if (!ctx_entry)
return NULL;
return ctx_entry->flow;
}
static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const void *obj) const void *obj)
{ {
...@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err) if (err)
return err; return err;
err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
if (err)
goto err_free_flow_table;
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */ /* Init ring buffer and unallocated mask_ids. */
...@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf) if (!priv->mask_ids.mask_id_free_list.buf)
goto err_free_flow_table; goto err_free_stats_ctx_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
...@@ -447,6 +516,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -447,6 +516,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
err_free_mask_id: err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_stats_ctx_table:
rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table: err_free_flow_table:
rhashtable_destroy(&priv->flow_table); rhashtable_destroy(&priv->flow_table);
return -ENOMEM; return -ENOMEM;
...@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) ...@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->flow_table, rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL); nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->stats_ctx_table,
nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats); kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
......
...@@ -55,6 +55,28 @@ ...@@ -55,6 +55,28 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_TP | \
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
struct nfp_flower_merge_check {
union {
struct {
__be16 tci;
struct nfp_flower_mac_mpls l2;
struct nfp_flower_tp_ports l4;
union {
struct nfp_flower_ipv4 ipv4;
struct nfp_flower_ipv6 ipv6;
};
};
unsigned long vals[8];
};
};
static int static int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype) u8 mtype)
...@@ -326,7 +348,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -326,7 +348,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break; break;
case cpu_to_be16(ETH_P_IPV6): case cpu_to_be16(ETH_P_IPV6):
key_layer |= NFP_FLOWER_LAYER_IPV6; key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6); key_size += sizeof(struct nfp_flower_ipv6);
break; break;
...@@ -376,6 +398,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) ...@@ -376,6 +398,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0; flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
return flow_pay; return flow_pay;
...@@ -388,6 +412,446 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) ...@@ -388,6 +412,446 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
return NULL; return NULL;
} }
static int
nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
struct nfp_flower_merge_check *merge,
u8 *last_act_id, int *act_out)
{
struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
struct nfp_fl_set_ip4_addrs *ipv4_add;
struct nfp_fl_set_ipv6_addr *ipv6_add;
struct nfp_fl_push_vlan *push_vlan;
struct nfp_fl_set_tport *tport;
struct nfp_fl_set_eth *eth;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
u8 act_id = 0;
u8 *ports;
int i;
while (act_off < flow->meta.act_len) {
a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
act_id = a->jump_id;
switch (act_id) {
case NFP_FL_ACTION_OPCODE_OUTPUT:
if (act_out)
(*act_out)++;
break;
case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
push_vlan = (struct nfp_fl_push_vlan *)a;
if (push_vlan->vlan_tci)
merge->tci = cpu_to_be16(0xffff);
break;
case NFP_FL_ACTION_OPCODE_POP_VLAN:
merge->tci = cpu_to_be16(0);
break;
case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
/* New tunnel header means l2 to l4 can be matched. */
eth_broadcast_addr(&merge->l2.mac_dst[0]);
eth_broadcast_addr(&merge->l2.mac_src[0]);
memset(&merge->l4, 0xff,
sizeof(struct nfp_flower_tp_ports));
memset(&merge->ipv4, 0xff,
sizeof(struct nfp_flower_ipv4));
break;
case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
eth = (struct nfp_fl_set_eth *)a;
for (i = 0; i < ETH_ALEN; i++)
merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
for (i = 0; i < ETH_ALEN; i++)
merge->l2.mac_src[i] |=
eth->eth_addr_mask[ETH_ALEN + i];
break;
case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
break;
case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
break;
case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
for (i = 0; i < 4; i++)
merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
ipv6_add->ipv6[i].mask;
break;
case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
for (i = 0; i < 4; i++)
merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
ipv6_add->ipv6[i].mask;
break;
case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
merge->ipv6.ip_ext.ttl |=
ipv6_tc_hl_fl->ipv6_hop_limit_mask;
merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
merge->ipv6.ipv6_flow_label_exthdr |=
ipv6_tc_hl_fl->ipv6_label_mask;
break;
case NFP_FL_ACTION_OPCODE_SET_UDP:
case NFP_FL_ACTION_OPCODE_SET_TCP:
tport = (struct nfp_fl_set_tport *)a;
ports = (u8 *)&merge->l4.port_src;
for (i = 0; i < 4; i++)
ports[i] |= tport->tp_port_mask[i];
break;
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
case NFP_FL_ACTION_OPCODE_PRE_LAG:
case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
break;
default:
return -EOPNOTSUPP;
}
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
if (last_act_id)
*last_act_id = act_id;
return 0;
}
static int
nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
struct nfp_flower_merge_check *merge,
bool extra_fields)
{
struct nfp_flower_meta_tci *meta_tci;
u8 *mask = flow->mask_data;
u8 key_layer, match_size;
memset(merge, 0, sizeof(struct nfp_flower_merge_check));
meta_tci = (struct nfp_flower_meta_tci *)mask;
key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
return -EOPNOTSUPP;
merge->tci = meta_tci->tci;
mask += sizeof(struct nfp_flower_meta_tci);
if (key_layer & NFP_FLOWER_LAYER_EXT_META)
mask += sizeof(struct nfp_flower_ext_meta);
mask += sizeof(struct nfp_flower_in_port);
if (key_layer & NFP_FLOWER_LAYER_MAC) {
match_size = sizeof(struct nfp_flower_mac_mpls);
memcpy(&merge->l2, mask, match_size);
mask += match_size;
}
if (key_layer & NFP_FLOWER_LAYER_TP) {
match_size = sizeof(struct nfp_flower_tp_ports);
memcpy(&merge->l4, mask, match_size);
mask += match_size;
}
if (key_layer & NFP_FLOWER_LAYER_IPV4) {
match_size = sizeof(struct nfp_flower_ipv4);
memcpy(&merge->ipv4, mask, match_size);
}
if (key_layer & NFP_FLOWER_LAYER_IPV6) {
match_size = sizeof(struct nfp_flower_ipv6);
memcpy(&merge->ipv6, mask, match_size);
}
return 0;
}
static int
nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
/* Two flows can be merged if sub_flow2 only matches on bits that are
* either matched by sub_flow1 or set by a sub_flow1 action. This
* ensures that every packet that hits sub_flow1 and recirculates is
* guaranteed to hit sub_flow2.
*/
struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
int err, act_out = 0;
u8 last_act_id = 0;
err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
true);
if (err)
return err;
err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
false);
if (err)
return err;
err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
&last_act_id, &act_out);
if (err)
return err;
/* Must only be 1 output action and it must be the last in sequence. */
if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP;
/* Reject merge if sub_flow2 matches on something that is not matched
* on or set in an action by sub_flow1.
*/
err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
sub_flow1_merge.vals,
sizeof(struct nfp_flower_merge_check) * 8);
if (err)
return -EINVAL;
return 0;
}
static unsigned int
nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
bool *tunnel_act)
{
unsigned int act_off = 0, act_len;
struct nfp_fl_act_head *a;
u8 act_id = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&act_src[act_off];
act_len = a->len_lw << NFP_FL_LW_SIZ;
act_id = a->jump_id;
switch (act_id) {
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
if (tunnel_act)
*tunnel_act = true;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
memcpy(act_dst + act_off, act_src + act_off, act_len);
break;
default:
return act_off;
}
act_off += act_len;
}
return act_off;
}
static int nfp_fl_verify_post_tun_acts(char *acts, int len)
{
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
return 0;
}
static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2,
struct nfp_fl_payload *merge_flow)
{
unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
bool tunnel_act = false;
char *merge_act;
int err;
/* The last action of sub_flow1 must be output - do not merge this. */
sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
sub2_act_len = sub_flow2->meta.act_len;
if (!sub2_act_len)
return -EINVAL;
if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
return -EINVAL;
/* A shortcut can only be applied if there is a single action. */
if (sub1_act_len)
merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
else
merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
merge_act = merge_flow->action_data;
/* Copy any pre-actions to the start of merge flow action list. */
pre_off1 = nfp_flower_copy_pre_actions(merge_act,
sub_flow1->action_data,
sub1_act_len, &tunnel_act);
merge_act += pre_off1;
sub1_act_len -= pre_off1;
pre_off2 = nfp_flower_copy_pre_actions(merge_act,
sub_flow2->action_data,
sub2_act_len, NULL);
merge_act += pre_off2;
sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
* a tunnel, sub_flow 2 can only have output actions for a valid merge.
*/
if (tunnel_act) {
char *post_tun_acts = &sub_flow2->action_data[pre_off2];
err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
if (err)
return err;
}
/* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
merge_act += sub1_act_len;
memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
return 0;
}
/* Flow link code should only be accessed under RTNL. */
static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
{
list_del(&link->merge_flow.list);
list_del(&link->sub_flow.list);
kfree(link);
}
static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
struct nfp_fl_payload *sub_flow)
{
struct nfp_fl_payload_link *link;
list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
if (link->sub_flow.flow == sub_flow) {
nfp_flower_unlink_flow(link);
return;
}
}
static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
struct nfp_fl_payload *sub_flow)
{
struct nfp_fl_payload_link *link;
link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
link->merge_flow.flow = merge_flow;
list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
link->sub_flow.flow = sub_flow;
list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
return 0;
}
/**
* nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
* @app: Pointer to the APP handle
* @sub_flow1: Initial flow matched to produce merge hint
* @sub_flow2: Post recirculation flow matched in merge hint
*
* Combines 2 flows (if valid) to a single flow, removing the initial from hw
* and offloading the new, merged flow.
*
* Return: negative value on error, 0 in success.
*/
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
struct tc_cls_flower_offload merge_tc_off;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
int err;
ASSERT_RTNL();
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
err = nfp_flower_can_merge(sub_flow1, sub_flow2);
if (err)
return err;
merge_key_ls.key_size = sub_flow1->meta.key_len;
merge_flow = nfp_flower_allocate_new(&merge_key_ls);
if (!merge_flow)
return -ENOMEM;
merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
merge_flow->ingress_dev = sub_flow1->ingress_dev;
memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
sub_flow1->meta.key_len);
memcpy(merge_flow->mask_data, sub_flow1->mask_data,
sub_flow1->meta.mask_len);
err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
if (err)
goto err_destroy_merge_flow;
err = nfp_flower_link_flows(merge_flow, sub_flow1);
if (err)
goto err_destroy_merge_flow;
err = nfp_flower_link_flows(merge_flow, sub_flow2);
if (err)
goto err_unlink_sub_flow1;
merge_tc_off.cookie = merge_flow->tc_flower_cookie;
err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
merge_flow->ingress_dev);
if (err)
goto err_unlink_sub_flow2;
err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
nfp_flower_table_params);
if (err)
goto err_release_metadata;
err = nfp_flower_xmit_flow(app, merge_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
if (err)
goto err_remove_rhash;
merge_flow->in_hw = true;
sub_flow1->in_hw = false;
return 0;
err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
&merge_flow->fl_node,
nfp_flower_table_params));
err_release_metadata:
nfp_modify_flow_metadata(app, merge_flow);
err_unlink_sub_flow2:
nfp_flower_unlink_flows(merge_flow, sub_flow2);
err_unlink_sub_flow1:
nfp_flower_unlink_flows(merge_flow, sub_flow1);
err_destroy_merge_flow:
kfree(merge_flow->action_data);
kfree(merge_flow->mask_data);
kfree(merge_flow->unmasked_data);
kfree(merge_flow);
return err;
}
/** /**
* nfp_flower_add_offload() - Adds a new flow to hardware. * nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle * @app: Pointer to the APP handle
...@@ -454,6 +918,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -454,6 +918,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (port) if (port)
port->tc_offload_cnt++; port->tc_offload_cnt++;
flow_pay->in_hw = true;
/* Deallocate flow payload when flower rule has been destroyed. */ /* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer); kfree(key_layer);
...@@ -475,6 +941,75 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -475,6 +941,75 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
return err; return err;
} }
static void
nfp_flower_remove_merge_flow(struct nfp_app *app,
struct nfp_fl_payload *del_sub_flow,
struct nfp_fl_payload *merge_flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload_link *link, *temp;
struct nfp_fl_payload *origin;
bool mod = false;
int err;
link = list_first_entry(&merge_flow->linked_flows,
struct nfp_fl_payload_link, merge_flow.list);
origin = link->sub_flow.flow;
/* Re-add rule the merge had overwritten if it has not been deleted. */
if (origin != del_sub_flow)
mod = true;
err = nfp_modify_flow_metadata(app, merge_flow);
if (err) {
nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
goto err_free_links;
}
if (!mod) {
err = nfp_flower_xmit_flow(app, merge_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err) {
nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
goto err_free_links;
}
} else {
__nfp_modify_flow_metadata(priv, origin);
err = nfp_flower_xmit_flow(app, origin,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
if (err)
nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
origin->in_hw = true;
}
err_free_links:
/* Clean any links connected with the merged flow. */
list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
merge_flow.list)
nfp_flower_unlink_flow(link);
kfree(merge_flow->action_data);
kfree(merge_flow->mask_data);
kfree(merge_flow->unmasked_data);
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
&merge_flow->fl_node,
nfp_flower_table_params));
kfree_rcu(merge_flow, rcu);
}
static void
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
struct nfp_fl_payload_link *link, *temp;
/* Remove any merge flow formed from the deleted sub_flow. */
list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
sub_flow.list)
nfp_flower_remove_merge_flow(app, sub_flow,
link->merge_flow.flow);
}
/** /**
* nfp_flower_del_offload() - Removes a flow from hardware. * nfp_flower_del_offload() - Removes a flow from hardware.
* @app: Pointer to the APP handle * @app: Pointer to the APP handle
...@@ -482,7 +1017,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -482,7 +1017,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
* @flow: TC flower classifier offload structure * @flow: TC flower classifier offload structure
* *
* Removes a flow from the repeated hash structure and clears the * Removes a flow from the repeated hash structure and clears the
* action payload. * action payload. Any flows merged from this are also deleted.
* *
* Return: negative value on error, 0 if removed successfully. * Return: negative value on error, 0 if removed successfully.
*/ */
...@@ -504,17 +1039,22 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -504,17 +1039,22 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
err = nfp_modify_flow_metadata(app, nfp_flow); err = nfp_modify_flow_metadata(app, nfp_flow);
if (err) if (err)
goto err_free_flow; goto err_free_merge_flow;
if (nfp_flow->nfp_tun_ipv4_addr) if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
if (!nfp_flow->in_hw) {
err = 0;
goto err_free_merge_flow;
}
err = nfp_flower_xmit_flow(app, nfp_flow, err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL); NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err) /* Fall through on error. */
goto err_free_flow;
err_free_flow: err_free_merge_flow:
nfp_flower_del_linked_merge_flows(app, nfp_flow);
if (port) if (port)
port->tc_offload_cnt--; port->tc_offload_cnt--;
kfree(nfp_flow->action_data); kfree(nfp_flow->action_data);
...@@ -527,6 +1067,52 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -527,6 +1067,52 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
return err; return err;
} }
static void
__nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *merge_flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload_link *link;
struct nfp_fl_payload *sub_flow;
u64 pkts, bytes, used;
u32 ctx_id;
ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
pkts = priv->stats[ctx_id].pkts;
/* Do not cycle subflows if no stats to distribute. */
if (!pkts)
return;
bytes = priv->stats[ctx_id].bytes;
used = priv->stats[ctx_id].used;
/* Reset stats for the merge flow. */
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
/* The merge flow has received stats updates from firmware.
* Distribute these stats to all subflows that form the merge.
* The stats will collected from TC via the subflows.
*/
list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
sub_flow = link->sub_flow.flow;
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
priv->stats[ctx_id].pkts += pkts;
priv->stats[ctx_id].bytes += bytes;
max_t(u64, priv->stats[ctx_id].used, used);
}
}
static void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
struct nfp_fl_payload_link *link;
/* Get merge flows that the subflow forms to distribute their stats. */
list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
}
/** /**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware. * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle * @app: Pointer to the APP handle
...@@ -553,6 +1139,10 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, ...@@ -553,6 +1139,10 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock); spin_lock_bh(&priv->stats_lock);
/* If request is for a sub_flow, update stats from merged flows. */
if (!list_empty(&nfp_flow->linked_flows))
nfp_flower_update_merge_stats(app, nfp_flow);
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts, priv->stats[ctx_id].used); priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
...@@ -682,7 +1272,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, ...@@ -682,7 +1272,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
int err; int err;
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
!(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
switch (f->command) { switch (f->command) {
......
...@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) ...@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ipv4_addr = payload->tun_info[i].ipv4; ipv4_addr = payload->tun_info[i].ipv4;
port = be32_to_cpu(payload->tun_info[i].egress_port); port = be32_to_cpu(payload->tun_info[i].egress_port);
netdev = nfp_app_repr_get(app, port); netdev = nfp_app_dev_get(app, port, NULL);
if (!netdev) if (!netdev)
continue; continue;
...@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{ {
struct nfp_tun_neigh payload; struct nfp_tun_neigh payload;
u32 port_id;
/* Only offload representor IPv4s for now. */ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
if (!nfp_netdev_is_nfp_repr(netdev)) if (!port_id)
return; return;
memset(&payload, 0, sizeof(struct nfp_tun_neigh)); memset(&payload, 0, sizeof(struct nfp_tun_neigh));
...@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.src_ipv4 = flow->saddr; payload.src_ipv4 = flow->saddr;
ether_addr_copy(payload.src_addr, netdev->dev_addr); ether_addr_copy(payload.src_addr, netdev->dev_addr);
neigh_ha_snapshot(payload.dst_addr, neigh, netdev); neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev)); payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */ /* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache(app, payload.dst_ipv4); nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
...@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ...@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
payload = nfp_flower_cmsg_get_data(skb); payload = nfp_flower_cmsg_get_data(skb);
netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev) if (!netdev)
goto route_fail_warning; goto route_fail_warning;
......
...@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm; ...@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm;
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock) * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation * @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up * @sriov_disable: app-specific sriov clean-up
* @repr_get: get representor netdev * @dev_get: get representor or internal port representing netdev
*/ */
struct nfp_app_type { struct nfp_app_type {
enum nfp_app_id id; enum nfp_app_id id;
...@@ -143,7 +143,8 @@ struct nfp_app_type { ...@@ -143,7 +143,8 @@ struct nfp_app_type {
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app); enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
int (*eswitch_mode_set)(struct nfp_app *app, u16 mode); int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
struct net_device *(*repr_get)(struct nfp_app *app, u32 id); struct net_device *(*dev_get)(struct nfp_app *app, u32 id,
bool *redir_egress);
}; };
/** /**
...@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app) ...@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app)
app->type->sriov_disable(app); app->type->sriov_disable(app);
} }
static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) static inline
struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id,
bool *redir_egress)
{ {
if (unlikely(!app || !app->type->repr_get)) if (unlikely(!app || !app->type->dev_get))
return NULL; return NULL;
return app->type->repr_get(app, id); return app->type->dev_get(app, id, redir_egress);
} }
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
......
...@@ -1683,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1683,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd; struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta; struct nfp_meta_parsed meta;
bool redir_egress = false;
struct net_device *netdev; struct net_device *netdev;
dma_addr_t new_dma_addr; dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0; u32 meta_len_xdp = 0;
...@@ -1818,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1818,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net *nn; struct nfp_net *nn;
nn = netdev_priv(dp->netdev); nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid); netdev = nfp_app_dev_get(nn->app, meta.portid,
&redir_egress);
if (unlikely(!netdev)) { if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL); NULL);
continue; continue;
} }
nfp_repr_inc_rx_stats(netdev, pkt_len);
if (nfp_netdev_is_nfp_repr(netdev))
nfp_repr_inc_rx_stats(netdev, pkt_len);
} }
skb = build_skb(rxbuf->frag, true_bufsz); skb = build_skb(rxbuf->frag, true_bufsz);
...@@ -1859,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1859,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (meta_len_xdp) if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp); skb_metadata_set(skb, meta_len_xdp);
napi_gro_receive(&rx_ring->r_vec->napi, skb); if (likely(!redir_egress)) {
napi_gro_receive(&rx_ring->r_vec->napi, skb);
} else {
skb->dev = netdev;
__skb_push(skb, ETH_HLEN);
dev_queue_xmit(skb);
}
} }
if (xdp_prog) { if (xdp_prog) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment