Commit 9aac3c18 authored by David S. Miller's avatar David S. Miller

Merge branch 'offloading-tc-rules-hw'

Hadar Hen Zion says:

====================
Offloading tc rules using underline Hardware device

This series adds flower classifier support in offloading tc rules when the
Software ingress device is different from the Hardware ingress device,
such as when dealing with IP tunnels

The first two patches are a small fixes to flower, checking the skip_hw flag
wasn't set before calling the Hardware offloading functions which will try to
offload the rule.

The next two patches are infrastructure patches, a preparation for the fourth
patch which is adding support in flower to offload rules when the ingress
device is not a Hardware device and therefore can't offload.
In this case ndo_setup_tc is called with the mirred (egress) device.

The last three patchs are adding mlx5e support to offload rules using the new
"egress_device" flag.

Thanks,
Hadar

Changes from v0:
- check if CONFIG_NET_CLS_ACT is defined befor calling tc_action_ops get_dev()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 25429d7b ebe06875
......@@ -3811,7 +3811,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
rep.vport = FDB_UPLINK_VPORT;
rep.priv_data = priv;
rep.netdev = netdev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
}
......
......@@ -208,7 +208,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = rep->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
return mlx5e_add_sqs_fwd_rules(priv);
......@@ -226,7 +227,8 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = rep->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
......@@ -287,6 +289,14 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
return -EOPNOTSUPP;
if (tc->egress_dev) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
proto, tc);
}
switch (tc->type) {
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
......@@ -384,6 +394,8 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
};
......@@ -553,7 +565,7 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
return -EINVAL;
}
rep->priv_data = netdev_priv(netdev);
rep->netdev = netdev;
err = mlx5e_attach_netdev(esw->dev, netdev);
if (err) {
......@@ -575,7 +587,7 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
mlx5e_detach_netdev(esw->dev, netdev);
err_destroy_netdev:
mlx5e_destroy_netdev(esw->dev, rep->priv_data);
mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
return err;
......@@ -584,10 +596,9 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
struct net_device *netdev = rep->netdev;
unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
}
......@@ -186,7 +186,7 @@ struct mlx5_eswitch_rep {
struct mlx5_eswitch_rep *rep);
u16 vport;
u8 hw_id[ETH_ALEN];
void *priv_data;
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
......@@ -318,6 +318,7 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport_index);
struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
......
......@@ -970,7 +970,7 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
rep->load = __rep->load;
rep->unload = __rep->unload;
rep->vport = __rep->vport;
rep->priv_data = __rep->priv_data;
rep->netdev = __rep->netdev;
ether_addr_copy(rep->hw_id, __rep->hw_id);
INIT_LIST_HEAD(&rep->vport_sqs_list);
......@@ -990,3 +990,13 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep->valid = false;
}
struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
{
#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
return rep->netdev;
}
......@@ -802,6 +802,7 @@ struct tc_to_netdev {
struct tc_cls_matchall_offload *cls_mall;
struct tc_cls_bpf_offload *cls_bpf;
};
bool egress_dev;
};
/* These structures hold the attributes of xdp state that are being passed
......
......@@ -119,6 +119,8 @@ struct tc_action_ops {
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int, const struct tc_action_ops *);
void (*stats_update)(struct tc_action *, u64, u32, u64);
int (*get_dev)(const struct tc_action *a, struct net *net,
struct net_device **mirred_dev);
};
struct tc_action_net {
......
......@@ -171,6 +171,8 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
struct net_device **hw_dev);
/**
* struct tcf_pkt_info - packet information
......@@ -425,16 +427,14 @@ struct tc_cls_u32_offload {
};
};
static inline bool tc_should_offload(const struct net_device *dev,
const struct tcf_proto *tp, u32 flags)
static inline bool tc_can_offload(const struct net_device *dev,
const struct tcf_proto *tp)
{
const struct Qdisc *sch = tp->q;
const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
if (!(dev->features & NETIF_F_HW_TC))
return false;
if (flags & TCA_CLS_FLAGS_SKIP_HW)
return false;
if (!dev->netdev_ops->ndo_setup_tc)
return false;
if (cops && cops->tcf_cl_offload)
......@@ -443,6 +443,19 @@ static inline bool tc_should_offload(const struct net_device *dev,
return true;
}
static inline bool tc_skip_hw(u32 flags)
{
return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
}
static inline bool tc_should_offload(const struct net_device *dev,
const struct tcf_proto *tp, u32 flags)
{
if (tc_skip_hw(flags))
return false;
return tc_can_offload(dev, tp);
}
static inline bool tc_skip_sw(u32 flags)
{
return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
......
......@@ -315,6 +315,17 @@ static struct notifier_block mirred_device_notifier = {
.notifier_call = mirred_device_event,
};
static int tcf_mirred_device(const struct tc_action *a, struct net *net,
struct net_device **mirred_dev)
{
int ifindex = tcf_mirred_ifindex(a);
*mirred_dev = __dev_get_by_index(net, ifindex);
if (!mirred_dev)
return -EINVAL;
return 0;
}
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.type = TCA_ACT_MIRRED,
......@@ -327,6 +338,7 @@ static struct tc_action_ops act_mirred_ops = {
.walk = tcf_mirred_walker,
.lookup = tcf_mirred_search,
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_device,
};
static __net_init int mirred_init_net(struct net *net)
......
......@@ -682,6 +682,30 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
}
EXPORT_SYMBOL(tcf_exts_dump_stats);
int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
struct net_device **hw_dev)
{
#ifdef CONFIG_NET_CLS_ACT
const struct tc_action *a;
LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (a->ops->get_dev) {
a->ops->get_dev(a, dev_net(dev), hw_dev);
break;
}
}
if (*hw_dev)
return 0;
#endif
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(tcf_exts_get_dev);
static int __init tc_filter_init(void)
{
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL);
......
......@@ -78,6 +78,8 @@ struct cls_fl_filter {
u32 handle;
u32 flags;
struct rcu_head rcu;
struct tc_to_netdev tc;
struct net_device *hw_dev;
};
static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
......@@ -201,81 +203,85 @@ static void fl_destroy_filter(struct rcu_head *head)
kfree(f);
}
static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
struct net_device *dev = f->hw_dev;
struct tc_to_netdev *tc = &f->tc;
if (!tc_should_offload(dev, tp, 0))
if (!tc_can_offload(dev, tp))
return;
offload.command = TC_CLSFLOWER_DESTROY;
offload.cookie = cookie;
offload.cookie = (unsigned long)f;
tc.type = TC_SETUP_CLSFLOWER;
tc.cls_flower = &offload;
tc->type = TC_SETUP_CLSFLOWER;
tc->cls_flower = &offload;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
struct flow_dissector *dissector,
struct fl_flow_key *mask,
struct fl_flow_key *key,
struct tcf_exts *actions,
unsigned long cookie, u32 flags)
struct cls_fl_filter *f)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
struct tc_to_netdev *tc = &f->tc;
int err;
if (!tc_should_offload(dev, tp, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
if (!tc_can_offload(dev, tp)) {
if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev))
return tc_skip_sw(f->flags) ? -EINVAL : 0;
dev = f->hw_dev;
tc->egress_dev = true;
} else {
f->hw_dev = dev;
}
offload.command = TC_CLSFLOWER_REPLACE;
offload.cookie = cookie;
offload.cookie = (unsigned long)f;
offload.dissector = dissector;
offload.mask = mask;
offload.key = key;
offload.exts = actions;
offload.key = &f->key;
offload.exts = &f->exts;
tc.type = TC_SETUP_CLSFLOWER;
tc.cls_flower = &offload;
tc->type = TC_SETUP_CLSFLOWER;
tc->cls_flower = &offload;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
&tc);
tc);
if (tc_skip_sw(flags))
if (tc_skip_sw(f->flags))
return err;
return 0;
}
static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
struct net_device *dev = f->hw_dev;
struct tc_to_netdev *tc = &f->tc;
if (!tc_should_offload(dev, tp, 0))
if (!tc_can_offload(dev, tp))
return;
offload.command = TC_CLSFLOWER_STATS;
offload.cookie = (unsigned long)f;
offload.exts = &f->exts;
tc.type = TC_SETUP_CLSFLOWER;
tc.cls_flower = &offload;
tc->type = TC_SETUP_CLSFLOWER;
tc->cls_flower = &offload;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
}
static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
{
list_del_rcu(&f->list);
fl_hw_destroy_filter(tp, (unsigned long)f);
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f);
tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, fl_destroy_filter);
}
......@@ -743,20 +749,20 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout;
}
err = fl_hw_replace_filter(tp,
&head->dissector,
&mask.key,
&fnew->key,
&fnew->exts,
(unsigned long)fnew,
fnew->flags);
if (err)
goto errout;
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp,
&head->dissector,
&mask.key,
fnew);
if (err)
goto errout;
}
if (fold) {
rhashtable_remove_fast(&head->ht, &fold->ht_node,
head->ht_params);
fl_hw_destroy_filter(tp, (unsigned long)fold);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold);
}
*arg = (unsigned long) fnew;
......@@ -879,7 +885,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
}
fl_hw_update_stats(tp, f);
if (!tc_skip_hw(f->flags))
fl_hw_update_stats(tp, f);
if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment