Commit 2ce9c93e authored by Manish Chopra's avatar Manish Chopra Committed by David S. Miller

qede: Ingress tc flower offload (drop action) support.

The main motive of this patch is to lay down driver's
tc offload infrastructure in place.

With these changes tc can offload various supported flow
profiles (4 tuples, src-ip, dst-ip, l4 port) for the drop
action. Dropped flows statistic is a global counter for
all the offloaded flows for drop action and is populated
in ethtool statistics as common "gft_filter_drop".

Examples -

tc qdisc add dev p4p1 ingress
tc filter add dev p4p1 protocol ipv4 parent ffff: flower \
	skip_sw ip_proto tcp dst_ip 192.168.40.200 action drop
tc filter add dev p4p1 protocol ipv4 parent ffff: flower \
	skip_sw ip_proto udp src_ip 192.168.40.100 action drop
tc filter add dev p4p1 protocol ipv4 parent ffff: flower \
	skip_sw ip_proto tcp src_ip 192.168.40.100 dst_ip 192.168.40.200 \
	src_port 453 dst_port 876 action drop
tc filter add dev p4p1 protocol ipv4 parent ffff: flower \
	skip_sw ip_proto tcp dst_port 98 action drop
Signed-off-by: default avatarManish Chopra <manish.chopra@cavium.com>
Signed-off-by: default avatarAriel Elior <ariel.elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 91a56adb
......@@ -52,6 +52,9 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_eth_if.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 33
#define QEDE_REVISION_VERSION 0
......@@ -469,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs);
......@@ -535,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct tc_cls_flower_offload *f);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
......
......@@ -1285,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
rc = qede_add_cls_rule(edev, info);
break;
case ETHTOOL_SRXCLSRLDEL:
rc = qede_del_cls_rule(edev, info);
rc = qede_delete_flow_filter(edev, info->fs.location);
break;
default:
DP_INFO(edev, "Command parameters not supported\n");
......
......@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node {
struct qede_arfs_tuple tuple;
u32 flow_id;
u16 sw_id;
u64 sw_id;
u16 rxq_id;
u16 next_rxq_id;
u8 vfid;
......@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->tuple.stringify(&n->tuple, tuple_buffer);
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
"%s sw_id[0x%x]: %s [vf %u queue %d]\n",
"%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
add_fltr ? "Adding" : "Deleting",
n->sw_id, tuple_buffer, n->vfid, rxq_id);
}
......@@ -152,7 +152,10 @@ static void
qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
{
kfree(fltr->data);
clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
kfree(fltr);
}
......@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
if (fw_rc) {
DP_NOTICE(edev,
"Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
"Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
fw_rc, fltr->flow_id, fltr->sw_id,
ntohs(fltr->tuple.src_port),
ntohs(fltr->tuple.dst_port), fltr->rxq_id);
......@@ -1348,7 +1351,7 @@ void qede_config_rx_mode(struct net_device *ndev)
}
static struct qede_arfs_fltr_node *
qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
{
struct qede_arfs_fltr_node *fltr;
......@@ -1959,9 +1962,8 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
return rc;
}
int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *fltr = NULL;
int rc = -EPERM;
......@@ -1970,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
goto unlock;
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
fsp->location);
cookie);
if (!fltr)
goto unlock;
......@@ -2000,3 +2002,293 @@ int qede_get_arfs_filter_count(struct qede_dev *edev)
__qede_unlock(edev);
return count;
}
static int qede_parse_actions(struct qede_dev *edev,
struct tcf_exts *exts)
{
int rc = -EINVAL, num_act = 0;
const struct tc_action *a;
bool is_drop = false;
LIST_HEAD(actions);
if (!tcf_exts_has_actions(exts)) {
DP_NOTICE(edev, "No tc actions received\n");
return rc;
}
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
num_act++;
if (is_tcf_gact_shot(a))
is_drop = true;
}
if (num_act == 1 && is_drop)
return 0;
return rc;
}
static int
qede_tc_parse_ports(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *t)
{
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key, *mask;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->mask);
if ((key->src && mask->src != U16_MAX) ||
(key->dst && mask->dst != U16_MAX)) {
DP_NOTICE(edev, "Do not support ports masks\n");
return -EINVAL;
}
t->src_port = key->src;
t->dst_port = key->dst;
}
return 0;
}
static int
qede_tc_parse_v6_common(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *t)
{
struct in6_addr zero_addr, addr;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *key, *mask;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
memcmp(&mask->src, &addr, sizeof(addr))) ||
(memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
memcmp(&mask->dst, &addr, sizeof(addr)))) {
DP_NOTICE(edev,
"Do not support IPv6 address prefix/mask\n");
return -EINVAL;
}
memcpy(&t->src_ipv6, &key->src, sizeof(addr));
memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
}
if (qede_tc_parse_ports(edev, f, t))
return -EINVAL;
return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int
qede_tc_parse_v4_common(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *t)
{
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *key, *mask;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->key);
mask = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
if ((key->src && mask->src != U32_MAX) ||
(key->dst && mask->dst != U32_MAX)) {
DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
return -EINVAL;
}
t->src_ipv4 = key->src;
t->dst_ipv4 = key->dst;
}
if (qede_tc_parse_ports(edev, f, t))
return -EINVAL;
return qede_set_v4_tuple_to_profile(edev, t);
}
static int
qede_tc_parse_tcp_v6(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
return qede_tc_parse_v6_common(edev, f, tuple);
}
static int
qede_tc_parse_tcp_v4(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
return qede_tc_parse_v4_common(edev, f, tuple);
}
static int
qede_tc_parse_udp_v6(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
return qede_tc_parse_v6_common(edev, f, tuple);
}
static int
qede_tc_parse_udp_v4(struct qede_dev *edev,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
return qede_tc_parse_v4_common(edev, f, tuple);
}
static int
qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
struct tc_cls_flower_offload *f,
struct qede_arfs_tuple *tuple)
{
int rc = -EINVAL;
u8 ip_proto = 0;
memset(tuple, 0, sizeof(*tuple));
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
DP_NOTICE(edev, "Unsupported key set:0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
}
if (proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)) {
DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
return -EPROTONOSUPPORT;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key;
key = skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
ip_proto = key->ip_proto;
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
rc = qede_tc_parse_tcp_v4(edev, f, tuple);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
rc = qede_tc_parse_tcp_v6(edev, f, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
rc = qede_tc_parse_udp_v4(edev, f, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
rc = qede_tc_parse_udp_v6(edev, f, tuple);
else
DP_NOTICE(edev, "Invalid tc protocol request\n");
return rc;
}
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct tc_cls_flower_offload *f)
{
struct qede_arfs_fltr_node *n;
int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
__qede_lock(edev);
if (!edev->arfs) {
rc = -EPERM;
goto unlock;
}
/* parse flower attribute and prepare filter */
if (qede_parse_flower_attr(edev, proto, f, &t))
goto unlock;
/* Validate profile mode and number of filters */
if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
goto unlock;
}
/* parse tc actions and get the vf_id */
if (qede_parse_actions(edev, f->exts))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
rc = -EEXIST;
goto unlock;
}
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
rc = -ENOMEM;
goto unlock;
}
min_hlen = qede_flow_get_min_header_size(&t);
n->data = kzalloc(min_hlen, GFP_KERNEL);
if (!n->data) {
kfree(n);
rc = -ENOMEM;
goto unlock;
}
memcpy(&n->tuple, &t, sizeof(n->tuple));
n->buf_len = min_hlen;
n->b_is_drop = true;
n->sw_id = f->cookie;
n->tuple.build_hdr(&n->tuple, n->data);
rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
if (rc)
goto unlock;
qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
rc = qede_poll_arfs_filter_config(edev, n);
unlock:
__qede_unlock(edev);
return rc;
}
......@@ -556,13 +556,67 @@ int qede_setup_tc(struct net_device *ndev, u8 num_tc)
return 0;
}
static int
qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
__be16 proto)
{
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
return qede_add_tc_flower_fltr(edev, proto, f);
case TC_CLSFLOWER_DESTROY:
return qede_delete_flow_filter(edev, f->cookie);
default:
return -EOPNOTSUPP;
}
}
static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct tc_cls_flower_offload *f;
struct qede_dev *edev = cb_priv;
if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
f = type_data;
return qede_set_flower(edev, f, f->common.protocol);
default:
return -EOPNOTSUPP;
}
}
static int qede_setup_tc_block(struct qede_dev *edev,
struct tc_block_offload *f)
{
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
qede_setup_tc_block_cb,
edev, edev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct qede_dev *edev = netdev_priv(dev);
struct tc_mqprio_qopt *mqprio;
switch (type) {
case TC_SETUP_BLOCK:
return qede_setup_tc_block(edev, type_data);
case TC_SETUP_QDISC_MQPRIO:
mqprio = type_data;
......@@ -727,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev)
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
hw_features |= NETIF_F_NTUPLE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment