Commit 13926d19 authored by Baowen Zheng's avatar Baowen Zheng Committed by David S. Miller

flow_offload: add reoffload process to update hw_count

Add reoffload process to update hw_count when driver
is inserted or removed.

We will delete the action if it is with skip_sw flag and
not offloaded to any hardware in reoffload process.

When reoffloading actions, we still offload the actions
that are added independent of filters.
Signed-off-by: default avatarBaowen Zheng <baowen.zheng@corigine.com>
Signed-off-by: default avatarLouis Peens <louis.peens@corigine.com>
Signed-off-by: default avatarSimon Horman <simon.horman@corigine.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e8cb5bcf
......@@ -7,6 +7,7 @@
*/
#include <linux/refcount.h>
#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
......@@ -254,6 +255,8 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
int tcf_action_update_hw_stats(struct tc_action *action);
int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
void *cb_priv, bool add);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
struct tcf_chain **handle,
struct netlink_ext_ack *newchain);
......@@ -265,6 +268,14 @@ DECLARE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
#endif
int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
#else /* !CONFIG_NET_CLS_ACT */
static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
void *cb_priv, bool add) {
return 0;
}
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/act_api.h>
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
#include <linux/mutex.h>
......@@ -417,6 +418,8 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
existing_qdiscs_register(cb, cb_priv);
mutex_unlock(&flow_indr_block_lock);
tcf_action_reoffload_cb(cb, cb_priv, true);
return 0;
}
EXPORT_SYMBOL(flow_indr_dev_register);
......@@ -469,6 +472,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
mutex_unlock(&flow_indr_block_lock);
tcf_action_reoffload_cb(cb, cb_priv, false);
flow_block_indr_notify(&cleanup_list);
kfree(indr_dev);
}
......
......@@ -137,6 +137,19 @@ static void offload_action_hw_count_set(struct tc_action *act,
act->in_hw_count = hw_count;
}
static void offload_action_hw_count_inc(struct tc_action *act,
u32 hw_count)
{
act->in_hw_count += hw_count;
}
static void offload_action_hw_count_dec(struct tc_action *act,
u32 hw_count)
{
act->in_hw_count = act->in_hw_count > hw_count ?
act->in_hw_count - hw_count : 0;
}
static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
{
if (is_tcf_pedit(act))
......@@ -183,9 +196,8 @@ static int offload_action_init(struct flow_offload_action *fl_action,
return -EOPNOTSUPP;
}
static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
u32 *hw_count,
struct netlink_ext_ack *extack)
static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
u32 *hw_count)
{
int err;
......@@ -200,9 +212,37 @@ static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
return 0;
}
/* offload the tc action after it is inserted */
static int tcf_action_offload_add(struct tc_action *action,
struct netlink_ext_ack *extack)
static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
u32 *hw_count,
flow_indr_block_bind_cb_t *cb,
void *cb_priv)
{
int err;
err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
if (err < 0)
return err;
if (hw_count)
*hw_count = 1;
return 0;
}
static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
u32 *hw_count,
flow_indr_block_bind_cb_t *cb,
void *cb_priv)
{
return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
cb, cb_priv) :
tcf_action_offload_cmd_ex(fl_act, hw_count);
}
static int tcf_action_offload_add_ex(struct tc_action *action,
struct netlink_ext_ack *extack,
flow_indr_block_bind_cb_t *cb,
void *cb_priv)
{
bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
......@@ -231,9 +271,10 @@ static int tcf_action_offload_add(struct tc_action *action,
goto fl_err;
}
err = tcf_action_offload_cmd(fl_action, &in_hw_count, extack);
err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
if (!err)
offload_action_hw_count_set(action, in_hw_count);
cb ? offload_action_hw_count_inc(action, in_hw_count) :
offload_action_hw_count_set(action, in_hw_count);
if (skip_sw && !tc_act_in_hw(action))
err = -EINVAL;
......@@ -246,6 +287,13 @@ static int tcf_action_offload_add(struct tc_action *action,
return err;
}
/* offload the tc action after it is inserted */
static int tcf_action_offload_add(struct tc_action *action,
struct netlink_ext_ack *extack)
{
return tcf_action_offload_add_ex(action, extack, NULL, NULL);
}
int tcf_action_update_hw_stats(struct tc_action *action)
{
struct flow_offload_action fl_act = {};
......@@ -258,7 +306,7 @@ int tcf_action_update_hw_stats(struct tc_action *action)
if (err)
return err;
err = tcf_action_offload_cmd(&fl_act, NULL, NULL);
err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
if (!err) {
preempt_disable();
tcf_action_stats_update(action, fl_act.stats.bytes,
......@@ -277,7 +325,9 @@ int tcf_action_update_hw_stats(struct tc_action *action)
}
EXPORT_SYMBOL(tcf_action_update_hw_stats);
static int tcf_action_offload_del(struct tc_action *action)
static int tcf_action_offload_del_ex(struct tc_action *action,
flow_indr_block_bind_cb_t *cb,
void *cb_priv)
{
struct flow_offload_action fl_act = {};
u32 in_hw_count = 0;
......@@ -290,16 +340,25 @@ static int tcf_action_offload_del(struct tc_action *action)
if (err)
return err;
err = tcf_action_offload_cmd(&fl_act, &in_hw_count, NULL);
if (err)
err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
if (err < 0)
return err;
if (action->in_hw_count != in_hw_count)
if (!cb && action->in_hw_count != in_hw_count)
return -EINVAL;
/* do not need to update hw state when deleting action */
if (cb && in_hw_count)
offload_action_hw_count_dec(action, in_hw_count);
return 0;
}
static int tcf_action_offload_del(struct tc_action *action)
{
return tcf_action_offload_del_ex(action, NULL, NULL);
}
static void tcf_action_cleanup(struct tc_action *p)
{
tcf_action_offload_del(p);
......@@ -794,6 +853,59 @@ EXPORT_SYMBOL(tcf_idrinfo_destroy);
static LIST_HEAD(act_base);
static DEFINE_RWLOCK(act_mod_lock);
/* since act ops id is stored in pernet subsystem list,
* then there is no way to walk through only all the action
* subsystem, so we keep tc action pernet ops id for
* reoffload to walk through.
*/
static LIST_HEAD(act_pernet_id_list);
static DEFINE_MUTEX(act_id_mutex);
struct tc_act_pernet_id {
struct list_head list;
unsigned int id;
};
static int tcf_pernet_add_id_list(unsigned int id)
{
struct tc_act_pernet_id *id_ptr;
int ret = 0;
mutex_lock(&act_id_mutex);
list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
if (id_ptr->id == id) {
ret = -EEXIST;
goto err_out;
}
}
id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
if (!id_ptr) {
ret = -ENOMEM;
goto err_out;
}
id_ptr->id = id;
list_add_tail(&id_ptr->list, &act_pernet_id_list);
err_out:
mutex_unlock(&act_id_mutex);
return ret;
}
static void tcf_pernet_del_id_list(unsigned int id)
{
struct tc_act_pernet_id *id_ptr;
mutex_lock(&act_id_mutex);
list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
if (id_ptr->id == id) {
list_del(&id_ptr->list);
kfree(id_ptr);
break;
}
}
mutex_unlock(&act_id_mutex);
}
int tcf_register_action(struct tc_action_ops *act,
struct pernet_operations *ops)
......@@ -812,18 +924,31 @@ int tcf_register_action(struct tc_action_ops *act,
if (ret)
return ret;
if (ops->id) {
ret = tcf_pernet_add_id_list(*ops->id);
if (ret)
goto err_id;
}
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
unregister_pernet_subsys(ops);
return -EEXIST;
ret = -EEXIST;
goto err_out;
}
}
list_add_tail(&act->head, &act_base);
write_unlock(&act_mod_lock);
return 0;
err_out:
write_unlock(&act_mod_lock);
if (ops->id)
tcf_pernet_del_id_list(*ops->id);
err_id:
unregister_pernet_subsys(ops);
return ret;
}
EXPORT_SYMBOL(tcf_register_action);
......@@ -842,8 +967,11 @@ int tcf_unregister_action(struct tc_action_ops *act,
}
}
write_unlock(&act_mod_lock);
if (!err)
if (!err) {
unregister_pernet_subsys(ops);
if (ops->id)
tcf_pernet_del_id_list(*ops->id);
}
return err;
}
EXPORT_SYMBOL(tcf_unregister_action);
......@@ -1595,6 +1723,96 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
return 0;
}
static int
tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
{
size_t attr_size = tcf_action_fill_size(action);
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
[0] = action,
};
const struct tc_action_ops *ops = action->ops;
struct sk_buff *skb;
int ret;
skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
GFP_KERNEL);
if (!skb)
return -ENOBUFS;
if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
ret = tcf_idr_release_unsafe(action);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
} else {
kfree_skb(skb);
}
return ret;
}
int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
void *cb_priv, bool add)
{
struct tc_act_pernet_id *id_ptr;
struct tcf_idrinfo *idrinfo;
struct tc_action_net *tn;
struct tc_action *p;
unsigned int act_id;
unsigned long tmp;
unsigned long id;
struct idr *idr;
struct net *net;
int ret;
if (!cb)
return -EINVAL;
down_read(&net_rwsem);
mutex_lock(&act_id_mutex);
for_each_net(net) {
list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
act_id = id_ptr->id;
tn = net_generic(net, act_id);
if (!tn)
continue;
idrinfo = tn->idrinfo;
if (!idrinfo)
continue;
mutex_lock(&idrinfo->lock);
idr = &idrinfo->action_idr;
idr_for_each_entry_ul(idr, p, tmp, id) {
if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
continue;
if (add) {
tcf_action_offload_add_ex(p, NULL, cb,
cb_priv);
continue;
}
/* cb unregister to update hw count */
ret = tcf_action_offload_del_ex(p, cb, cb_priv);
if (ret < 0)
continue;
if (tc_act_skip_sw(p->tcfa_flags) &&
!tc_act_in_hw(p))
tcf_reoffload_del_notify(net, p);
}
mutex_unlock(&idrinfo->lock);
}
}
mutex_unlock(&act_id_mutex);
up_read(&net_rwsem);
return 0;
}
static int
tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment