Commit 7a2bb0f0 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-fixes'

Simon Horman says:

====================
Fixes for nfp pre_tunnel code

Louis Peens says:

The following set of patches fixes up a few bugs in the pre_tun
decap code paths which has been hiding for a while.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a3bc4832 d8ce0275
...@@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
goto err_free_ctx_entry; goto err_free_ctx_entry;
} }
/* Do net allocate a mask-id for pre_tun_rules. These flows are used to
* configure the pre_tun table and are never actually send to the
* firmware as an add-flow message. This causes the mask-id allocation
* on the firmware to get out of sync if allocated here.
*/
new_mask_id = 0; new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data, if (!nfp_flow->pre_tun_rule.dev &&
!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) { &nfp_flow->meta.flags, &new_mask_id)) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
...@@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
goto err_remove_mask; goto err_remove_mask;
} }
if (!nfp_check_mask_remove(app, nfp_flow->mask_data, if (!nfp_flow->pre_tun_rule.dev &&
!nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, nfp_flow->meta.mask_len,
NULL, &new_mask_id)) { NULL, &new_mask_id)) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
...@@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
return 0; return 0;
err_remove_mask: err_remove_mask:
nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, if (!nfp_flow->pre_tun_rule.dev)
NULL, &new_mask_id); nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
NULL, &new_mask_id);
err_remove_rhash: err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
&ctx_entry->ht_node, &ctx_entry->ht_node,
...@@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app, ...@@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
__nfp_modify_flow_metadata(priv, nfp_flow); __nfp_modify_flow_metadata(priv, nfp_flow);
nfp_check_mask_remove(app, nfp_flow->mask_data, if (!nfp_flow->pre_tun_rule.dev)
nfp_flow->meta.mask_len, &nfp_flow->meta.flags, nfp_check_mask_remove(app, nfp_flow->mask_data,
&new_mask_id); nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
&new_mask_id);
/* Update flow payload with mask ids. */ /* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
......
...@@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, ...@@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
return -EOPNOTSUPP;
}
/* Skip fields known to exist. */ /* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci); mask += sizeof(struct nfp_flower_meta_tci);
ext += sizeof(struct nfp_flower_meta_tci); ext += sizeof(struct nfp_flower_meta_tci);
...@@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, ...@@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
mask += sizeof(struct nfp_flower_in_port); mask += sizeof(struct nfp_flower_in_port);
ext += sizeof(struct nfp_flower_in_port); ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address matches pre_tun_dev. */
mac = (struct nfp_flower_mac_mpls *)ext;
if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
return -EOPNOTSUPP;
}
/* Ensure destination MAC address is fully matched. */ /* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask; mac = (struct nfp_flower_mac_mpls *)mask;
if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
...@@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, ...@@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (mac->mpls_lse) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
return -EOPNOTSUPP;
}
mask += sizeof(struct nfp_flower_mac_mpls); mask += sizeof(struct nfp_flower_mac_mpls);
ext += sizeof(struct nfp_flower_mac_mpls); ext += sizeof(struct nfp_flower_mac_mpls);
if (key_layer & NFP_FLOWER_LAYER_IPV4 || if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
......
...@@ -16,8 +16,9 @@ ...@@ -16,8 +16,9 @@
#define NFP_FL_MAX_ROUTES 32 #define NFP_FL_MAX_ROUTES 32
#define NFP_TUN_PRE_TUN_RULE_LIMIT 32 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32
#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0)
#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3)
#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
/** /**
* struct nfp_tun_pre_run_rule - rule matched before decap * struct nfp_tun_pre_run_rule - rule matched before decap
...@@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, ...@@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
{ {
struct nfp_flower_priv *app_priv = app->priv; struct nfp_flower_priv *app_priv = app->priv;
struct nfp_tun_offloaded_mac *mac_entry; struct nfp_tun_offloaded_mac *mac_entry;
struct nfp_flower_meta_tci *key_meta;
struct nfp_tun_pre_tun_rule payload; struct nfp_tun_pre_tun_rule payload;
struct net_device *internal_dev; struct net_device *internal_dev;
int err; int err;
...@@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, ...@@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
if (!mac_entry) if (!mac_entry)
return -ENOENT; return -ENOENT;
/* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
* set/clear for port_idx.
*/
key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
else
mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
payload.port_idx = cpu_to_be16(mac_entry->index); payload.port_idx = cpu_to_be16(mac_entry->index);
/* Copy mac id and vlan to flow - dev may not exist at delete time. */ /* Copy mac id and vlan to flow - dev may not exist at delete time. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment