Commit c8d129c4 authored by Edwin Peer's avatar Edwin Peer Committed by Jakub Kicinski

bnxt_en: implement fully specified 5-tuple masks

Support subfield masking for IP addresses and ports. Previously, only
entire fields could be included or excluded in NTUPLE filters.
Reviewed-by: default avatarPavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: default avatarEdwin Peer <edwin.peer@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Reviewed-by: default avatarMichal Swiatkowski <michal.swiatkowski@linux.intel.com>
Link: https://lore.kernel.org/r/20240205223202.25341-5-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 7c8036fb
...@@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = { ...@@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = {
static struct workqueue_struct *bnxt_pf_wq; static struct workqueue_struct *bnxt_pf_wq;
#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
.ports = {
.src = 0,
.dst = 0,
},
.addrs = {
.v6addrs = {
.src = BNXT_IPV6_MASK_NONE,
.dst = BNXT_IPV6_MASK_NONE,
},
},
};
const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
.ports = {
.src = cpu_to_be16(0xffff),
.dst = cpu_to_be16(0xffff),
},
.addrs = {
.v6addrs = {
.src = BNXT_IPV6_MASK_ALL,
.dst = BNXT_IPV6_MASK_ALL,
},
},
};
const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
.ports = {
.src = cpu_to_be16(0xffff),
.dst = cpu_to_be16(0xffff),
},
.addrs = {
.v4addrs = {
.src = cpu_to_be32(0xffffffff),
.dst = cpu_to_be32(0xffffffff),
},
},
};
static bool bnxt_vf_pciid(enum board_idx idx) static bool bnxt_vf_pciid(enum board_idx idx)
{ {
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
...@@ -5690,6 +5733,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -5690,6 +5733,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
{ {
struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req; struct hwrm_cfa_ntuple_filter_alloc_input *req;
struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys; struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr; struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic; struct bnxt_vnic_info *vnic;
...@@ -5722,25 +5766,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -5722,25 +5766,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->ethertype = htons(ETH_P_IPV6); req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type = req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
*(struct in6_addr *)&req->src_ipaddr[0] = *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
keys->addrs.v6addrs.src; *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
bnxt_fill_ipv6_mask(req->src_ipaddr_mask); *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
*(struct in6_addr *)&req->dst_ipaddr[0] =
keys->addrs.v6addrs.dst;
bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
}
} else { } else {
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { req->src_ipaddr[0] = keys->addrs.v4addrs.src;
req->src_ipaddr[0] = keys->addrs.v4addrs.src; req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
} req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
}
} }
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
...@@ -5748,14 +5782,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -5748,14 +5782,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
} }
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { req->src_port = keys->ports.src;
req->src_port = keys->ports.src; req->src_port_mask = masks->ports.src;
req->src_port_mask = cpu_to_be16(0xffff); req->dst_port = keys->ports.dst;
} req->dst_port_mask = masks->ports.dst;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
req->dst_port = keys->ports.dst;
req->dst_port_mask = cpu_to_be16(0xffff);
}
resp = hwrm_req_hold(bp, req); resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req); rc = hwrm_req_send(bp, req);
...@@ -13956,45 +13986,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, ...@@ -13956,45 +13986,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2) struct bnxt_ntuple_filter *f2)
{ {
struct bnxt_flow_masks *masks1 = &f1->fmasks;
struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys; struct flow_keys *keys2 = &f2->fkeys;
if (f1->ntuple_flags != f2->ntuple_flags)
return false;
if (keys1->basic.n_proto != keys2->basic.n_proto || if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto) keys1->basic.ip_proto != keys2->basic.ip_proto)
return false; return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) { if (keys1->basic.n_proto == htons(ETH_P_IP)) {
if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) || masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)) masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false; return false;
} else { } else {
if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src) ||
&keys2->addrs.v6addrs.src, !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
sizeof(keys1->addrs.v6addrs.src))) || &masks2->addrs.v6addrs.src) ||
((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst) ||
&keys2->addrs.v6addrs.dst, !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
sizeof(keys1->addrs.v6addrs.dst)))) &masks2->addrs.v6addrs.dst))
return false; return false;
} }
if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) && return keys1->ports.src == keys2->ports.src &&
keys1->ports.src != keys2->ports.src) || masks1->ports.src == masks2->ports.src &&
((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) && keys1->ports.dst == keys2->ports.dst &&
keys1->ports.dst != keys2->ports.dst)) masks1->ports.dst == masks2->ports.dst &&
return false; keys1->control.flags == keys2->control.flags &&
f1->l2_fltr == f2->l2_fltr;
if (keys1->control.flags == keys2->control.flags &&
f1->l2_fltr == f2->l2_fltr)
return true;
return false;
} }
struct bnxt_ntuple_filter * struct bnxt_ntuple_filter *
...@@ -14059,10 +14083,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -14059,10 +14083,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT; rc = -EPROTONOSUPPORT;
goto err_free; goto err_free;
} }
if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
bp->hwrm_spec_code < 0x10601) { if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
rc = -EPROTONOSUPPORT; if (bp->hwrm_spec_code < 0x10601) {
goto err_free; rc = -EPROTONOSUPPORT;
goto err_free;
}
new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
} }
flags = fkeys->control.flags; flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) && if (((flags & FLOW_DIS_ENCAPSULATION) &&
...@@ -14070,9 +14097,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -14070,9 +14097,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT; rc = -EPROTONOSUPPORT;
goto err_free; goto err_free;
} }
new_fltr->l2_fltr = l2_fltr; new_fltr->l2_fltr = l2_fltr;
new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock(); rcu_read_lock();
......
...@@ -1355,19 +1355,20 @@ struct bnxt_filter_base { ...@@ -1355,19 +1355,20 @@ struct bnxt_filter_base {
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct bnxt_flow_masks {
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
};
extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
struct bnxt_ntuple_filter { struct bnxt_ntuple_filter {
struct bnxt_filter_base base; struct bnxt_filter_base base;
struct flow_keys fkeys; struct flow_keys fkeys;
struct bnxt_flow_masks fmasks;
struct bnxt_l2_filter *l2_fltr; struct bnxt_l2_filter *l2_fltr;
u32 ntuple_flags;
#define BNXT_NTUPLE_MATCH_SRC_IP 1
#define BNXT_NTUPLE_MATCH_DST_IP 2
#define BNXT_NTUPLE_MATCH_SRC_PORT 4
#define BNXT_NTUPLE_MATCH_DST_PORT 8
#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
BNXT_NTUPLE_MATCH_DST_IP | \
BNXT_NTUPLE_MATCH_SRC_PORT | \
BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id; u32 flow_id;
}; };
......
...@@ -1080,6 +1080,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1080,6 +1080,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
(struct ethtool_rx_flow_spec *)&cmd->fs; (struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base; struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr; struct bnxt_ntuple_filter *fltr;
struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys; struct flow_keys *fkeys;
int rc = -EINVAL; int rc = -EINVAL;
...@@ -1127,6 +1128,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1127,6 +1128,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fkeys = &fltr->fkeys; fkeys = &fltr->fkeys;
fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) { if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
if (fkeys->basic.ip_proto == IPPROTO_TCP) if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->flow_type = TCP_V4_FLOW; fs->flow_type = TCP_V4_FLOW;
...@@ -1135,22 +1137,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1135,22 +1137,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
else else
goto fltr_err; goto fltr_err;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
} fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
} fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
}
} else { } else {
if (fkeys->basic.ip_proto == IPPROTO_TCP) if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->flow_type = TCP_V6_FLOW; fs->flow_type = TCP_V6_FLOW;
...@@ -1159,24 +1153,18 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1159,24 +1153,18 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
else else
goto fltr_err; goto fltr_err;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = fkeys->addrs.v6addrs.src;
fkeys->addrs.v6addrs.src; *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src); fmasks->addrs.v6addrs.src;
} *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { fkeys->addrs.v6addrs.dst;
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
fkeys->addrs.v6addrs.dst; fmasks->addrs.v6addrs.dst;
bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst); fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
} fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
} }
fs->ring_cookie = fltr->base.rxq; fs->ring_cookie = fltr->base.rxq;
...@@ -1240,19 +1228,6 @@ static int bnxt_add_l2_cls_rule(struct bnxt *bp, ...@@ -1240,19 +1228,6 @@ static int bnxt_add_l2_cls_rule(struct bnxt *bp,
return rc; return rc;
} }
#define IPV4_ALL_MASK ((__force __be32)~0)
#define L4_PORT_ALL_MASK ((__force __be16)~0)
static bool ipv6_mask_is_full(__be32 mask[4])
{
return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
}
static bool ipv6_mask_is_zero(__be32 mask[4])
{
return !(mask[0] | mask[1] | mask[2] | mask[3]);
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
...@@ -1260,6 +1235,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, ...@@ -1260,6 +1235,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct bnxt_ntuple_filter *new_fltr, *fltr; struct bnxt_ntuple_filter *new_fltr, *fltr;
struct bnxt_l2_filter *l2_fltr; struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
u32 flow_type = fs->flow_type; u32 flow_type = fs->flow_type;
struct flow_keys *fkeys; struct flow_keys *fkeys;
u32 idx; u32 idx;
...@@ -1278,6 +1254,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, ...@@ -1278,6 +1254,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
l2_fltr = bp->vnic_info[0].l2_filters[0]; l2_fltr = bp->vnic_info[0].l2_filters[0];
atomic_inc(&l2_fltr->refcnt); atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr; new_fltr->l2_fltr = l2_fltr;
fmasks = &new_fltr->fmasks;
fkeys = &new_fltr->fkeys; fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
...@@ -1291,32 +1268,14 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, ...@@ -1291,32 +1268,14 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (flow_type == UDP_V4_FLOW) if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP; fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP); fkeys->basic.n_proto = htons(ETH_P_IP);
fkeys->addrs.v4addrs.src = ip_spec->ip4src;
if (ip_mask->ip4src == IPV4_ALL_MASK) { fmasks->addrs.v4addrs.src = ip_mask->ip4src;
fkeys->addrs.v4addrs.src = ip_spec->ip4src; fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP; fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
} else if (ip_mask->ip4src) { fkeys->ports.src = ip_spec->psrc;
goto ntuple_err; fmasks->ports.src = ip_mask->psrc;
} fkeys->ports.dst = ip_spec->pdst;
if (ip_mask->ip4dst == IPV4_ALL_MASK) { fmasks->ports.dst = ip_mask->pdst;
fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
} else if (ip_mask->ip4dst) {
goto ntuple_err;
}
if (ip_mask->psrc == L4_PORT_ALL_MASK) {
fkeys->ports.src = ip_spec->psrc;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
} else if (ip_mask->psrc) {
goto ntuple_err;
}
if (ip_mask->pdst == L4_PORT_ALL_MASK) {
fkeys->ports.dst = ip_spec->pdst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
} else if (ip_mask->pdst) {
goto ntuple_err;
}
break; break;
} }
case TCP_V6_FLOW: case TCP_V6_FLOW:
...@@ -1329,40 +1288,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, ...@@ -1329,40 +1288,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
fkeys->basic.ip_proto = IPPROTO_UDP; fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6); fkeys->basic.n_proto = htons(ETH_P_IPV6);
if (ipv6_mask_is_full(ip_mask->ip6src)) { fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
fkeys->addrs.v6addrs.src = fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
*(struct in6_addr *)&ip_spec->ip6src; fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP; fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
} else if (!ipv6_mask_is_zero(ip_mask->ip6src)) { fkeys->ports.src = ip_spec->psrc;
goto ntuple_err; fmasks->ports.src = ip_mask->psrc;
} fkeys->ports.dst = ip_spec->pdst;
if (ipv6_mask_is_full(ip_mask->ip6dst)) { fmasks->ports.dst = ip_mask->pdst;
fkeys->addrs.v6addrs.dst =
*(struct in6_addr *)&ip_spec->ip6dst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
} else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
goto ntuple_err;
}
if (ip_mask->psrc == L4_PORT_ALL_MASK) {
fkeys->ports.src = ip_spec->psrc;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
} else if (ip_mask->psrc) {
goto ntuple_err;
}
if (ip_mask->pdst == L4_PORT_ALL_MASK) {
fkeys->ports.dst = ip_spec->pdst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
} else if (ip_mask->pdst) {
goto ntuple_err;
}
break; break;
} }
default: default:
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
goto ntuple_err; goto ntuple_err;
} }
if (!new_fltr->ntuple_flags) if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
goto ntuple_err; goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment