Commit e37268eb authored by David S. Miller's avatar David S. Miller

Merge branch 'add-flow_rule-infrastructure'

Pablo Neira Ayuso says:

====================
add flow_rule infrastructure

This patchset, as is, allows us to reuse the driver codebase to
configure ACL hardware offloads for the ethtool_rxnfc and the TC flower
interfaces. A few clients for this infrastructure are presented, such as
the bcm_sf2 and the qede drivers, for reference. Moreover all of the
existing drivers in the tree are converted to use this infrastructure.

This patchset is re-using the existing flow dissector infrastructure
that was introduced by Jiri Pirko et al. so the amount of abstractions
that this patchset adds are minimal. Well, just a few wrapper structures
for the selector side of the rules. And, in order to express actions,
this patchset exposes an action API that is based on the existing TC
action infrastructure and what existing drivers already support on that
front.

v7: This patchset is a rebase on top of the net-next tree, after
    addressing questions and feedback from driver developers in the
    last batch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d9b5a675 37c5d3ef
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/dsa.h> #include <net/dsa.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <net/flow_offload.h>
#include "bcm_sf2.h" #include "bcm_sf2.h"
#include "bcm_sf2_regs.h" #include "bcm_sf2_regs.h"
...@@ -257,7 +258,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, ...@@ -257,7 +258,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
} }
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
struct ethtool_tcpip4_spec *v4_spec, struct flow_dissector_key_ipv4_addrs *addrs,
struct flow_dissector_key_ports *ports,
unsigned int slice_num, unsigned int slice_num,
bool mask) bool mask)
{ {
...@@ -278,7 +280,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, ...@@ -278,7 +280,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A6 [23:8] * UDF_n_A6 [23:8]
* UDF_n_A5 [7:0] * UDF_n_A5 [7:0]
*/ */
reg = be16_to_cpu(v4_spec->pdst) >> 8; reg = be16_to_cpu(ports->dst) >> 8;
if (mask) if (mask)
offset = CORE_CFP_MASK_PORT(3); offset = CORE_CFP_MASK_PORT(3);
else else
...@@ -289,9 +291,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, ...@@ -289,9 +291,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A4 [23:8] * UDF_n_A4 [23:8]
* UDF_n_A3 [7:0] * UDF_n_A3 [7:0]
*/ */
reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
(u32)be16_to_cpu(v4_spec->psrc) << 8 | (u32)be16_to_cpu(ports->src) << 8 |
(be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
if (mask) if (mask)
offset = CORE_CFP_MASK_PORT(2); offset = CORE_CFP_MASK_PORT(2);
else else
...@@ -302,9 +304,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, ...@@ -302,9 +304,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A2 [23:8] * UDF_n_A2 [23:8]
* UDF_n_A1 [7:0] * UDF_n_A1 [7:0]
*/ */
reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
(u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
(be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
if (mask) if (mask)
offset = CORE_CFP_MASK_PORT(1); offset = CORE_CFP_MASK_PORT(1);
else else
...@@ -317,8 +319,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, ...@@ -317,8 +319,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* Slice ID [3:2] * Slice ID [3:2]
* Slice valid [1:0] * Slice valid [1:0]
*/ */
reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
(u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
SLICE_NUM(slice_num) | SLICE_VALID; SLICE_NUM(slice_num) | SLICE_VALID;
if (mask) if (mask)
offset = CORE_CFP_MASK_PORT(0); offset = CORE_CFP_MASK_PORT(0);
...@@ -332,9 +334,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -332,9 +334,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int queue_num, unsigned int queue_num,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; struct ethtool_rx_flow_spec_input input = {};
const struct cfp_udf_layout *layout; const struct cfp_udf_layout *layout;
unsigned int slice_num, rule_index; unsigned int slice_num, rule_index;
struct ethtool_rx_flow_rule *flow;
struct flow_match_ipv4_addrs ipv4;
struct flow_match_ports ports;
struct flow_match_ip ip;
u8 ip_proto, ip_frag; u8 ip_proto, ip_frag;
u8 num_udf; u8 num_udf;
u32 reg; u32 reg;
...@@ -343,13 +349,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -343,13 +349,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
switch (fs->flow_type & ~FLOW_EXT) { switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
ip_proto = IPPROTO_TCP; ip_proto = IPPROTO_TCP;
v4_spec = &fs->h_u.tcp_ip4_spec;
v4_m_spec = &fs->m_u.tcp_ip4_spec;
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
ip_proto = IPPROTO_UDP; ip_proto = IPPROTO_UDP;
v4_spec = &fs->h_u.udp_ip4_spec;
v4_m_spec = &fs->m_u.udp_ip4_spec;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -367,11 +369,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -367,11 +369,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
if (rule_index > bcm_sf2_cfp_rule_size(priv)) if (rule_index > bcm_sf2_cfp_rule_size(priv))
return -ENOSPC; return -ENOSPC;
input.fs = fs;
flow = ethtool_rx_flow_rule_create(&input);
if (IS_ERR(flow))
return PTR_ERR(flow);
flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
flow_rule_match_ports(flow->rule, &ports);
flow_rule_match_ip(flow->rule, &ip);
layout = &udf_tcpip4_layout; layout = &udf_tcpip4_layout;
/* We only use one UDF slice for now */ /* We only use one UDF slice for now */
slice_num = bcm_sf2_get_slice_number(layout, 0); slice_num = bcm_sf2_get_slice_number(layout, 0);
if (slice_num == UDF_NUM_SLICES) if (slice_num == UDF_NUM_SLICES) {
return -EINVAL; ret = -EINVAL;
goto out_err_flow_rule;
}
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
...@@ -398,7 +411,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -398,7 +411,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
* Reserved [1] * Reserved [1]
* UDF_Valid[8] [0] * UDF_Valid[8] [0]
*/ */
core_writel(priv, v4_spec->tos << IPTOS_SHIFT | core_writel(priv, ip.key->tos << IPTOS_SHIFT |
ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
udf_upper_bits(num_udf), udf_upper_bits(num_udf),
CORE_CFP_DATA_PORT(6)); CORE_CFP_DATA_PORT(6));
...@@ -417,8 +430,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -417,8 +430,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
/* Program the match and the mask */ /* Program the match and the mask */
bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false); bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true); bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
/* Insert into TCAM now */ /* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index); bcm_sf2_cfp_rule_addr_set(priv, rule_index);
...@@ -426,14 +439,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -426,14 +439,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) { if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index); pr_err("TCAM entry at addr %d failed\n", rule_index);
return ret; goto out_err_flow_rule;
} }
/* Insert into Action and policer RAMs now */ /* Insert into Action and policer RAMs now */
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
queue_num, true); queue_num, true);
if (ret) if (ret)
return ret; goto out_err_flow_rule;
/* Turn on CFP for this rule now */ /* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG); reg = core_readl(priv, CORE_CFP_CTL_REG);
...@@ -446,6 +459,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -446,6 +459,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
fs->location = rule_index; fs->location = rule_index;
return 0; return 0;
out_err_flow_rule:
ethtool_rx_flow_rule_destroy(flow);
return ret;
} }
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
...@@ -582,8 +599,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -582,8 +599,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
struct ethtool_rx_flow_spec_input input = {};
unsigned int slice_num, rule_index[2]; unsigned int slice_num, rule_index[2];
const struct cfp_udf_layout *layout; const struct cfp_udf_layout *layout;
struct ethtool_rx_flow_rule *flow;
struct flow_match_ipv6_addrs ipv6;
struct flow_match_ports ports;
u8 ip_proto, ip_frag; u8 ip_proto, ip_frag;
int ret = 0; int ret = 0;
u8 num_udf; u8 num_udf;
...@@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
goto out_err; goto out_err;
} }
input.fs = fs;
flow = ethtool_rx_flow_rule_create(&input);
if (IS_ERR(flow)) {
ret = PTR_ERR(flow);
goto out_err;
}
flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
flow_rule_match_ports(flow->rule, &ports);
/* Apply the UDF layout for this filter */ /* Apply the UDF layout for this filter */
bcm_sf2_cfp_udf_set(priv, layout, slice_num); bcm_sf2_cfp_udf_set(priv, layout, slice_num);
...@@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
/* Slice the IPv6 source address and port */ /* Slice the IPv6 source address and port */
bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
slice_num, false); ports.key->src, slice_num, false);
bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
SLICE_NUM_MASK, true); ports.mask->src, SLICE_NUM_MASK, true);
/* Insert into TCAM now because we need to insert a second rule */ /* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
...@@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) { if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[0]); pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
goto out_err; goto out_err_flow_rule;
} }
/* Insert into Action and policer RAMs now */ /* Insert into Action and policer RAMs now */
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
queue_num, false); queue_num, false);
if (ret) if (ret)
goto out_err; goto out_err_flow_rule;
/* Now deal with the second slice to chain this rule */ /* Now deal with the second slice to chain this rule */
slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
if (slice_num == UDF_NUM_SLICES) { if (slice_num == UDF_NUM_SLICES) {
ret = -EINVAL; ret = -EINVAL;
goto out_err; goto out_err_flow_rule;
} }
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
...@@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Mask all */ /* Mask all */
core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num, bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
false); ports.key->dst, slice_num, false);
bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst, bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
SLICE_NUM_MASK, true); ports.key->dst, SLICE_NUM_MASK, true);
/* Insert into TCAM now */ /* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
...@@ -759,7 +789,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -759,7 +789,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) { if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[1]); pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
goto out_err; goto out_err_flow_rule;
} }
/* Insert into Action and policer RAMs now, set chain ID to /* Insert into Action and policer RAMs now, set chain ID to
...@@ -768,7 +798,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -768,7 +798,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num, ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
queue_num, true); queue_num, true);
if (ret) if (ret)
goto out_err; goto out_err_flow_rule;
/* Turn on CFP for this rule now */ /* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG); reg = core_readl(priv, CORE_CFP_CTL_REG);
...@@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ...@@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
return ret; return ret;
out_err_flow_rule:
ethtool_rx_flow_rule_destroy(flow);
out_err: out_err:
clear_bit(rule_index[1], priv->cfp.used); clear_bit(rule_index[1], priv->cfp.used);
return ret; return ret;
......
...@@ -7169,11 +7169,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ...@@ -7169,11 +7169,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
struct tc_cls_flower_offload *f, struct tc_cls_flower_offload *f,
struct i40e_cloud_filter *filter) struct i40e_cloud_filter *filter)
{ {
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u8 field_flags = 0; u8 field_flags = 0;
if (f->dissector->used_keys & if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
...@@ -7183,143 +7185,109 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ...@@ -7183,143 +7185,109 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys); dissector->used_keys);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_dissector_key_keyid *key = struct flow_match_enc_keyid match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->key);
struct flow_dissector_key_keyid *mask = flow_rule_match_enc_keyid(rule, &match);
skb_flow_dissector_target(f->dissector, if (match.mask->keyid != 0)
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->mask);
if (mask->keyid != 0)
field_flags |= I40E_CLOUD_FIELD_TEN_ID; field_flags |= I40E_CLOUD_FIELD_TEN_ID;
filter->tenant_id = be32_to_cpu(key->keyid); filter->tenant_id = be32_to_cpu(match.key->keyid);
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key = struct flow_match_basic match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
struct flow_dissector_key_basic *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->mask);
n_proto_key = ntohs(key->n_proto); flow_rule_match_basic(rule, &match);
n_proto_mask = ntohs(mask->n_proto); n_proto_key = ntohs(match.key->n_proto);
n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) { if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0; n_proto_key = 0;
n_proto_mask = 0; n_proto_mask = 0;
} }
filter->n_proto = n_proto_key & n_proto_mask; filter->n_proto = n_proto_key & n_proto_mask;
filter->ip_proto = key->ip_proto; filter->ip_proto = match.key->ip_proto;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key = struct flow_match_eth_addrs match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->key);
struct flow_dissector_key_eth_addrs *mask = flow_rule_match_eth_addrs(rule, &match);
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->mask);
/* use is_broadcast and is_zero to check for all 0xf or 0 */ /* use is_broadcast and is_zero to check for all 0xf or 0 */
if (!is_zero_ether_addr(mask->dst)) { if (!is_zero_ether_addr(match.mask->dst)) {
if (is_broadcast_ether_addr(mask->dst)) { if (is_broadcast_ether_addr(match.mask->dst)) {
field_flags |= I40E_CLOUD_FIELD_OMAC; field_flags |= I40E_CLOUD_FIELD_OMAC;
} else { } else {
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
mask->dst); match.mask->dst);
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
if (!is_zero_ether_addr(mask->src)) { if (!is_zero_ether_addr(match.mask->src)) {
if (is_broadcast_ether_addr(mask->src)) { if (is_broadcast_ether_addr(match.mask->src)) {
field_flags |= I40E_CLOUD_FIELD_IMAC; field_flags |= I40E_CLOUD_FIELD_IMAC;
} else { } else {
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
mask->src); match.mask->src);
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
ether_addr_copy(filter->dst_mac, key->dst); ether_addr_copy(filter->dst_mac, match.key->dst);
ether_addr_copy(filter->src_mac, key->src); ether_addr_copy(filter->src_mac, match.key->src);
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key = struct flow_match_vlan match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->key);
struct flow_dissector_key_vlan *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id) { flow_rule_match_vlan(rule, &match);
if (mask->vlan_id == VLAN_VID_MASK) { if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
field_flags |= I40E_CLOUD_FIELD_IVLAN; field_flags |= I40E_CLOUD_FIELD_IVLAN;
} else { } else {
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
mask->vlan_id); match.mask->vlan_id);
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
filter->vlan_id = cpu_to_be16(key->vlan_id); filter->vlan_id = cpu_to_be16(match.key->vlan_id);
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key = struct flow_match_control match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_CONTROL,
f->key);
addr_type = key->addr_type; flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
} }
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key = struct flow_match_ipv4_addrs match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS, flow_rule_match_ipv4_addrs(rule, &match);
f->key); if (match.mask->dst) {
struct flow_dissector_key_ipv4_addrs *mask = if (match.mask->dst == cpu_to_be32(0xffffffff)) {
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
if (mask->dst) {
if (mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP; field_flags |= I40E_CLOUD_FIELD_IIP;
} else { } else {
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
&mask->dst); &match.mask->dst);
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
if (mask->src) { if (match.mask->src) {
if (mask->src == cpu_to_be32(0xffffffff)) { if (match.mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP; field_flags |= I40E_CLOUD_FIELD_IIP;
} else { } else {
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
&mask->src); &match.mask->src);
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
...@@ -7328,70 +7296,60 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ...@@ -7328,70 +7296,60 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
filter->dst_ipv4 = key->dst; filter->dst_ipv4 = match.key->dst;
filter->src_ipv4 = key->src; filter->src_ipv4 = match.key->src;
} }
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_dissector_key_ipv6_addrs *key = struct flow_match_ipv6_addrs match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS, flow_rule_match_ipv6_addrs(rule, &match);
f->key);
struct flow_dissector_key_ipv6_addrs *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
/* src and dest IPV6 address should not be LOOPBACK /* src and dest IPV6 address should not be LOOPBACK
* (0:0:0:0:0:0:0:1), which can be represented as ::1 * (0:0:0:0:0:0:0:1), which can be represented as ::1
*/ */
if (ipv6_addr_loopback(&key->dst) || if (ipv6_addr_loopback(&match.key->dst) ||
ipv6_addr_loopback(&key->src)) { ipv6_addr_loopback(&match.key->src)) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Bad ipv6, addr is LOOPBACK\n"); "Bad ipv6, addr is LOOPBACK\n");
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
field_flags |= I40E_CLOUD_FIELD_IIP; field_flags |= I40E_CLOUD_FIELD_IIP;
memcpy(&filter->src_ipv6, &key->src.s6_addr32, memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
sizeof(filter->src_ipv6)); sizeof(filter->src_ipv6));
memcpy(&filter->dst_ipv6, &key->dst.s6_addr32, memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
sizeof(filter->dst_ipv6)); sizeof(filter->dst_ipv6));
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key = struct flow_match_ports match;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->key);
struct flow_dissector_key_ports *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->mask);
if (mask->src) { flow_rule_match_ports(rule, &match);
if (mask->src == cpu_to_be16(0xffff)) { if (match.mask->src) {
if (match.mask->src == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP; field_flags |= I40E_CLOUD_FIELD_IIP;
} else { } else {
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
be16_to_cpu(mask->src)); be16_to_cpu(match.mask->src));
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
if (mask->dst) { if (match.mask->dst) {
if (mask->dst == cpu_to_be16(0xffff)) { if (match.mask->dst == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP; field_flags |= I40E_CLOUD_FIELD_IIP;
} else { } else {
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
be16_to_cpu(mask->dst)); be16_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG; return I40E_ERR_CONFIG;
} }
} }
filter->dst_port = key->dst; filter->dst_port = match.key->dst;
filter->src_port = key->src; filter->src_port = match.key->src;
switch (filter->ip_proto) { switch (filter->ip_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
......
This diff is collapsed.
...@@ -2581,9 +2581,11 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, ...@@ -2581,9 +2581,11 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
int traffic_class, int traffic_class,
struct igb_nfc_filter *input) struct igb_nfc_filter *input)
{ {
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
if (f->dissector->used_keys & if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) | ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
...@@ -2593,78 +2595,60 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, ...@@ -2593,78 +2595,60 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key, *mask; struct flow_match_eth_addrs match;
key = skb_flow_dissector_target(f->dissector, flow_rule_match_eth_addrs(rule, &match);
FLOW_DISSECTOR_KEY_ETH_ADDRS, if (!is_zero_ether_addr(match.mask->dst)) {
f->key); if (!is_broadcast_ether_addr(match.mask->dst)) {
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->mask);
if (!is_zero_ether_addr(mask->dst)) {
if (!is_broadcast_ether_addr(mask->dst)) {
NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
return -EINVAL; return -EINVAL;
} }
input->filter.match_flags |= input->filter.match_flags |=
IGB_FILTER_FLAG_DST_MAC_ADDR; IGB_FILTER_FLAG_DST_MAC_ADDR;
ether_addr_copy(input->filter.dst_addr, key->dst); ether_addr_copy(input->filter.dst_addr, match.key->dst);
} }
if (!is_zero_ether_addr(mask->src)) { if (!is_zero_ether_addr(match.mask->src)) {
if (!is_broadcast_ether_addr(mask->src)) { if (!is_broadcast_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
return -EINVAL; return -EINVAL;
} }
input->filter.match_flags |= input->filter.match_flags |=
IGB_FILTER_FLAG_SRC_MAC_ADDR; IGB_FILTER_FLAG_SRC_MAC_ADDR;
ether_addr_copy(input->filter.src_addr, key->src); ether_addr_copy(input->filter.src_addr, match.key->src);
} }
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key, *mask; struct flow_match_basic match;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->mask);
if (mask->n_proto) { flow_rule_match_basic(rule, &match);
if (mask->n_proto != ETHER_TYPE_FULL_MASK) { if (match.mask->n_proto) {
if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
return -EINVAL; return -EINVAL;
} }
input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
input->filter.etype = key->n_proto; input->filter.etype = match.key->n_proto;
} }
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key, *mask; struct flow_match_vlan match;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_priority) { flow_rule_match_vlan(rule, &match);
if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { if (match.mask->vlan_priority) {
if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
return -EINVAL; return -EINVAL;
} }
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
input->filter.vlan_tci = key->vlan_priority; input->filter.vlan_tci = match.key->vlan_priority;
} }
} }
......
...@@ -496,25 +496,21 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, ...@@ -496,25 +496,21 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
void *headers_c, void *headers_c,
void *headers_v) void *headers_v)
{ {
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->key);
struct flow_dissector_key_ports *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
void *misc_c = MLX5_ADDR_OF(fte_match_param, void *misc_c = MLX5_ADDR_OF(fte_match_param,
spec->match_criteria, spec->match_criteria,
misc_parameters); misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, void *misc_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value, spec->match_value,
misc_parameters); misc_parameters);
struct flow_match_ports enc_ports;
flow_rule_match_enc_ports(rule, &enc_ports);
/* Full udp dst port must be given */ /* Full udp dst port must be given */
if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) || if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) { memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"VXLAN decap filter must include enc_dst_port condition"); "VXLAN decap filter must include enc_dst_port condition");
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
...@@ -523,12 +519,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, ...@@ -523,12 +519,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
} }
/* udp dst port must be knonwn as a VXLAN port */ /* udp dst port must be knonwn as a VXLAN port */
if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) { if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Matched UDP port is not registered as a VXLAN port"); "Matched UDP port is not registered as a VXLAN port");
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"UDP port %d is not registered as a VXLAN port\n", "UDP port %d is not registered as a VXLAN port\n",
be16_to_cpu(key->dst)); be16_to_cpu(enc_ports.key->dst));
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -536,26 +532,26 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, ...@@ -536,26 +532,26 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); ntohs(enc_ports.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(enc_ports.key->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src)); ntohs(enc_ports.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
ntohs(enc_ports.key->src));
/* match on VNI */ /* match on VNI */
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_dissector_key_keyid *key = struct flow_match_enc_keyid enc_keyid;
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID, flow_rule_match_enc_keyid(rule, &enc_keyid);
f->key);
struct flow_dissector_key_keyid *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->mask);
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(mask->keyid)); be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(key->keyid)); be32_to_cpu(enc_keyid.key->keyid));
} }
return 0; return 0;
} }
...@@ -570,6 +566,7 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, ...@@ -570,6 +566,7 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
misc_parameters); misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters); misc_parameters);
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) { if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
NL_SET_ERR_MSG_MOD(f->common.extack, NL_SET_ERR_MSG_MOD(f->common.extack,
...@@ -587,21 +584,14 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, ...@@ -587,21 +584,14 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB); MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
/* gre key */ /* gre key */
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_dissector_key_keyid *mask = NULL; struct flow_match_enc_keyid enc_keyid;
struct flow_dissector_key_keyid *key = NULL;
mask = skb_flow_dissector_target(f->dissector, flow_rule_match_enc_keyid(rule, &enc_keyid);
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->mask);
MLX5_SET(fte_match_set_misc, misc_c, MLX5_SET(fte_match_set_misc, misc_c,
gre_key.key, be32_to_cpu(mask->keyid)); gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->key);
MLX5_SET(fte_match_set_misc, misc_v, MLX5_SET(fte_match_set_misc, misc_v,
gre_key.key, be32_to_cpu(key->keyid)); gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
} }
return 0; return 0;
......
...@@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, ...@@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
{ {
u8 ethertype; u8 ethertype;
if (action == TCA_VLAN_ACT_MODIFY) { if (action == FLOW_ACTION_VLAN_MANGLE) {
switch (proto) { switch (proto) {
case ETH_P_8021Q: case ETH_P_8021Q:
ethertype = 0; ethertype = 0;
......
...@@ -102,23 +102,22 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, ...@@ -102,23 +102,22 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{ {
return dissector_uses_key(f->dissector, struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
dissector_uses_key(f->dissector, return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
FLOW_DISSECTOR_KEY_IPV6_ADDRS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
dissector_uses_key(f->dissector, flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
FLOW_DISSECTOR_KEY_PORTS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
} }
static int static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
u32 *key_layer_two, int *key_size) u32 *key_layer_two, int *key_size)
{ {
if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (enc_opts->len > 0) { if (enc_opts->key->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options); *key_size += sizeof(struct nfp_flower_geneve_options);
} }
...@@ -133,20 +132,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -133,20 +132,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct tc_cls_flower_offload *flow, struct tc_cls_flower_offload *flow,
enum nfp_flower_tun_type *tun_type) enum nfp_flower_tun_type *tun_type)
{ {
struct flow_dissector_key_basic *mask_basic = NULL; struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
struct flow_dissector_key_basic *key_basic = NULL; struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two; u32 key_layer_two;
u8 key_layer; u8 key_layer;
int key_size; int key_size;
int err; int err;
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* If any tun dissector is used then the required set must be used. */ /* If any tun dissector is used then the required set must be used. */
if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
(flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -155,76 +155,53 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -155,76 +155,53 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size = sizeof(struct nfp_flower_meta_tci) + key_size = sizeof(struct nfp_flower_meta_tci) +
sizeof(struct nfp_flower_in_port); sizeof(struct nfp_flower_in_port);
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
key_layer |= NFP_FLOWER_LAYER_MAC; key_layer |= NFP_FLOWER_LAYER_MAC;
key_size += sizeof(struct nfp_flower_mac_mpls); key_size += sizeof(struct nfp_flower_mac_mpls);
} }
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *flow_vlan; struct flow_match_vlan vlan;
flow_vlan = skb_flow_dissector_target(flow->dissector, flow_rule_match_vlan(rule, &vlan);
FLOW_DISSECTOR_KEY_VLAN,
flow->mask);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
flow_vlan->vlan_priority) vlan.key->vlan_priority)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (dissector_uses_key(flow->dissector, if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_match_enc_opts enc_op = { NULL, NULL };
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; struct flow_match_ipv4_addrs ipv4_addrs;
struct flow_dissector_key_ports *mask_enc_ports = NULL; struct flow_match_control enc_ctl;
struct flow_dissector_key_enc_opts *enc_op = NULL; struct flow_match_ports enc_ports;
struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl = flow_rule_match_enc_control(rule, &enc_ctl);
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL, if (enc_ctl.mask->addr_type != 0xffff ||
flow->mask); enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
struct flow_dissector_key_control *enc_ctl =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* These fields are already verified as used. */ /* These fields are already verified as used. */
mask_ipv4 = flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
skb_flow_dissector_target(flow->dissector, if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
flow->mask);
if (mask_ipv4->dst != cpu_to_be32(~0))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mask_enc_ports =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->mask);
enc_ports =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->key);
if (mask_enc_ports->dst != cpu_to_be16(~0)) flow_rule_match_enc_ports(rule, &enc_ports);
if (enc_ports.mask->dst != cpu_to_be16(~0))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (dissector_uses_key(flow->dissector, if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
FLOW_DISSECTOR_KEY_ENC_OPTS)) { flow_rule_match_enc_opts(rule, &enc_op);
enc_op = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_OPTS,
flow->key);
}
switch (enc_ports->dst) { switch (enc_ports.key->dst) {
case htons(NFP_FL_VXLAN_PORT): case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN; *tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN; key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun); key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (enc_op) if (enc_op.key)
return -EOPNOTSUPP; return -EOPNOTSUPP;
break; break;
case htons(NFP_FL_GENEVE_PORT): case htons(NFP_FL_GENEVE_PORT):
...@@ -236,11 +213,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -236,11 +213,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun); key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
if (!enc_op) if (!enc_op.key)
break; break;
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two, err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
&key_size); &key_size);
if (err) if (err)
return err; return err;
...@@ -254,19 +231,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -254,19 +231,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
mask_basic = skb_flow_dissector_target(flow->dissector, flow_rule_match_basic(rule, &basic);
FLOW_DISSECTOR_KEY_BASIC,
flow->mask);
key_basic = skb_flow_dissector_target(flow->dissector, if (basic.mask && basic.mask->n_proto) {
FLOW_DISSECTOR_KEY_BASIC,
flow->key);
}
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */ /* Ethernet type is present in the key. */
switch (key_basic->n_proto) { switch (basic.key->n_proto) {
case cpu_to_be16(ETH_P_IP): case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4; key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4); key_size += sizeof(struct nfp_flower_ipv4);
...@@ -305,9 +275,9 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -305,9 +275,9 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
} }
} }
if (mask_basic && mask_basic->ip_proto) { if (basic.mask && basic.mask->ip_proto) {
/* Ethernet type is present in the key. */ /* Ethernet type is present in the key. */
switch (key_basic->ip_proto) { switch (basic.key->ip_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
case IPPROTO_UDP: case IPPROTO_UDP:
case IPPROTO_SCTP: case IPPROTO_SCTP:
...@@ -324,14 +294,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -324,14 +294,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
} }
} }
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_dissector_key_tcp *tcp; struct flow_match_tcp tcp;
u32 tcp_flags; u32 tcp_flags;
tcp = skb_flow_dissector_target(flow->dissector, flow_rule_match_tcp(rule, &tcp);
FLOW_DISSECTOR_KEY_TCP, tcp_flags = be16_to_cpu(tcp.key->flags);
flow->key);
tcp_flags = be16_to_cpu(tcp->flags);
if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -347,12 +315,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -347,12 +315,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
* space, thus we need to ensure we include a IPv4/IPv6 key * space, thus we need to ensure we include a IPv4/IPv6 key
* layer if we have not done so already. * layer if we have not done so already.
*/ */
if (!key_basic) if (!basic.key)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) { !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
switch (key_basic->n_proto) { switch (basic.key->n_proto) {
case cpu_to_be16(ETH_P_IP): case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4; key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4); key_size += sizeof(struct nfp_flower_ipv4);
...@@ -369,14 +337,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, ...@@ -369,14 +337,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
} }
} }
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key_ctl; struct flow_match_control ctl;
key_ctl = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_CONTROL,
flow->key);
if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) flow_rule_match_control(rule, &ctl);
if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -589,9 +554,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, ...@@ -589,9 +554,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock); spin_lock_bh(&priv->stats_lock);
tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts, priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
priv->stats[ctx_id].used);
priv->stats[ctx_id].pkts = 0; priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0; priv->stats[ctx_id].bytes = 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o ...@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
fib_notifier.o xdp.o fib_notifier.o xdp.o flow_offload.o
obj-y += net-sysfs.o obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o obj-$(CONFIG_PAGE_POOL) += page_pool.o
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment