Commit 8685255e authored by David S. Miller's avatar David S. Miller

Merge branch 'sch_act_lockless'

Eric Dumazet says:

====================
net_sched: act: lockless operation

As mentioned by Alexei last week in Budapest, it is a bit weird
to take a spinlock in order to drop a packet in a tc filter...

Lets add percpu infra for tc actions and use it for gact & mirred.

Before changes, my host with 8 RX queues was handling 5 Mpps with gact,
and more than 11 Mpps after.

Mirred change is not yet visible if ifb+qdisc is used, as ifb is
not yet multi queue enabled, but is a step forward.
====================
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0a6d4245 2ee22a90
......@@ -21,6 +21,8 @@ struct tcf_common {
struct gnet_stats_rate_est64 tcfc_rate_est;
spinlock_t tcfc_lock;
struct rcu_head tcfc_rcu;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
};
#define tcf_head common.tcfc_head
#define tcf_index common.tcfc_index
......@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
kfree(hf->htab);
}
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
static inline void tcf_lastuse_update(struct tcf_t *tm)
{
unsigned long now = jiffies;
if (tm->lastuse != now)
tm->lastuse = now;
}
#ifdef CONFIG_NET_CLS_ACT
#define ACT_P_CREATED 1
......@@ -103,7 +116,7 @@ int tcf_hash_release(struct tc_action *a, int bind);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
int tcf_hash_check(u32 index, struct tc_action *a, int bind);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
int size, int bind);
int size, int bind, bool cpustats);
void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
void tcf_hash_insert(struct tc_action *a);
......
......@@ -513,17 +513,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
const struct sk_buff *skb)
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
const struct sk_buff *skb)
{
struct gnet_stats_basic_cpu *bstats =
this_cpu_ptr(sch->cpu_bstats);
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
......@@ -547,16 +550,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
sch->qstats.drops += count;
}
static inline void qdisc_qstats_drop(struct Qdisc *sch)
static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
{
sch->qstats.drops++;
qstats->drops++;
}
static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
{
struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
qstats->overlimits++;
}
qstats->drops++;
static inline void qdisc_qstats_drop(struct Qdisc *sch)
{
qstats_drop_inc(&sch->qstats);
}
static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
......
......@@ -6,9 +6,10 @@
struct tcf_gact {
struct tcf_common common;
#ifdef CONFIG_GACT_PROB
u16 tcfg_ptype;
u16 tcfg_pval;
int tcfg_paction;
u16 tcfg_ptype;
u16 tcfg_pval;
int tcfg_paction;
atomic_t packets;
#endif
};
#define to_gact(a) \
......
......@@ -8,7 +8,7 @@ struct tcf_mirred {
int tcfm_eaction;
int tcfm_ifindex;
int tcfm_ok_push;
struct net_device *tcfm_dev;
struct net_device __rcu *tcfm_dev;
struct list_head tcfm_list;
};
#define to_mirred(a) \
......
......@@ -3646,7 +3646,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
qdisc_skb_cb(skb)->pkt_len = skb->len;
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
qdisc_bstats_update_cpu(cl->q, skb);
qdisc_bstats_cpu_update(cl->q, skb);
switch (tc_classify(skb, cl, &cl_res)) {
case TC_ACT_OK:
......@@ -3654,7 +3654,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
skb->tc_index = TC_H_MIN(cl_res.classid);
break;
case TC_ACT_SHOT:
qdisc_qstats_drop_cpu(cl->q);
qdisc_qstats_cpu_drop(cl->q);
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
kfree_skb(skb);
......
......@@ -27,6 +27,15 @@
#include <net/act_api.h>
#include <net/netlink.h>
static void free_tcf(struct rcu_head *head)
{
struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
free_percpu(p->cpu_bstats);
free_percpu(p->cpu_qstats);
kfree(p);
}
void tcf_hash_destroy(struct tc_action *a)
{
struct tcf_common *p = a->priv;
......@@ -41,7 +50,7 @@ void tcf_hash_destroy(struct tc_action *a)
* gen_estimator est_timer() might access p->tcfc_lock
* or bstats, wait a RCU grace period before freeing p
*/
kfree_rcu(p, tcfc_rcu);
call_rcu(&p->tcfc_rcu, free_tcf);
}
EXPORT_SYMBOL(tcf_hash_destroy);
......@@ -230,15 +239,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
if (est)
gen_kill_estimator(&pc->tcfc_bstats,
&pc->tcfc_rate_est);
kfree_rcu(pc, tcfc_rcu);
call_rcu(&pc->tcfc_rcu, free_tcf);
}
EXPORT_SYMBOL(tcf_hash_cleanup);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
int size, int bind)
int size, int bind, bool cpustats)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = kzalloc(size, GFP_KERNEL);
int err = -ENOMEM;
if (unlikely(!p))
return -ENOMEM;
......@@ -246,18 +256,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
if (bind)
p->tcfc_bindcnt = 1;
if (cpustats) {
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
if (!p->cpu_bstats) {
err1:
kfree(p);
return err;
}
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!p->cpu_qstats) {
err2:
free_percpu(p->cpu_bstats);
goto err1;
}
}
spin_lock_init(&p->tcfc_lock);
INIT_HLIST_NODE(&p->tcfc_head);
p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
if (est) {
int err = gen_new_estimator(&p->tcfc_bstats, NULL,
&p->tcfc_rate_est,
&p->tcfc_lock, est);
err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
&p->tcfc_rate_est,
&p->tcfc_lock, est);
if (err) {
kfree(p);
return err;
free_percpu(p->cpu_qstats);
goto err2;
}
}
......@@ -615,10 +639,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0)
goto errout;
if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 ||
gnet_stats_copy_queue(&d, NULL,
gnet_stats_copy_queue(&d, p->cpu_qstats,
&p->tcfc_qstats,
p->tcfc_qstats.qlen) < 0)
goto errout;
......
......@@ -281,7 +281,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, act, bind)) {
ret = tcf_hash_create(parm->index, est, act,
sizeof(*prog), bind);
sizeof(*prog), bind, false);
if (ret < 0)
goto destroy_fp;
......
......@@ -108,7 +108,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
bind, false);
if (ret)
return ret;
......
......@@ -62,7 +62,8 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
parm = nla_data(tb[TCA_CSUM_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
......
......@@ -28,14 +28,18 @@
#ifdef CONFIG_GACT_PROB
static int gact_net_rand(struct tcf_gact *gact)
{
if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
if (prandom_u32() % gact->tcfg_pval)
return gact->tcf_action;
return gact->tcfg_paction;
}
static int gact_determ(struct tcf_gact *gact)
{
if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
u32 pack = atomic_inc_return(&gact->packets);
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
if (pack % gact->tcfg_pval)
return gact->tcf_action;
return gact->tcfg_paction;
}
......@@ -85,7 +89,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
#endif
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
......@@ -99,16 +104,19 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
gact = to_gact(a);
spin_lock_bh(&gact->tcf_lock);
ASSERT_RTNL();
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
if (p_parm) {
gact->tcfg_paction = p_parm->paction;
gact->tcfg_pval = p_parm->pval;
gact->tcfg_pval = max_t(u16, 1, p_parm->pval);
/* Make sure tcfg_pval is written before tcfg_ptype
* coupled with smp_rmb() in gact_net_rand() & gact_determ()
*/
smp_wmb();
gact->tcfg_ptype = p_parm->ptype;
}
#endif
spin_unlock_bh(&gact->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(a);
return ret;
......@@ -118,23 +126,21 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_gact *gact = a->priv;
int action = TC_ACT_SHOT;
int action = READ_ONCE(gact->tcf_action);
spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
if (gact->tcfg_ptype)
action = gact_rand[gact->tcfg_ptype](gact);
else
action = gact->tcf_action;
#else
action = gact->tcf_action;
{
u32 ptype = READ_ONCE(gact->tcfg_ptype);
if (ptype)
action = gact_rand[ptype](gact);
}
#endif
gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
gact->tcf_bstats.packets++;
bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
if (action == TC_ACT_SHOT)
gact->tcf_qstats.drops++;
gact->tcf_tm.lastuse = jiffies;
spin_unlock(&gact->tcf_lock);
qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
tcf_lastuse_update(&gact->tcf_tm);
return action;
}
......
......@@ -114,7 +114,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
index = nla_get_u32(tb[TCA_IPT_INDEX]);
if (!tcf_hash_check(index, a, bind) ) {
ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
......
......@@ -35,9 +35,11 @@ static LIST_HEAD(mirred_list);
static void tcf_mirred_release(struct tc_action *a, int bind)
{
struct tcf_mirred *m = to_mirred(a);
struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
list_del(&m->tcfm_list);
if (m->tcfm_dev)
dev_put(m->tcfm_dev);
if (dev)
dev_put(dev);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
......@@ -93,7 +95,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, a, bind)) {
if (dev == NULL)
return -EINVAL;
ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
......@@ -105,18 +108,18 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
}
m = to_mirred(a);
spin_lock_bh(&m->tcf_lock);
ASSERT_RTNL();
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
if (dev != NULL) {
m->tcfm_ifindex = parm->ifindex;
if (ret != ACT_P_CREATED)
dev_put(m->tcfm_dev);
dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
dev_hold(dev);
m->tcfm_dev = dev;
rcu_assign_pointer(m->tcfm_dev, dev);
m->tcfm_ok_push = ok_push;
}
spin_unlock_bh(&m->tcf_lock);
if (ret == ACT_P_CREATED) {
list_add(&m->tcfm_list, &mirred_list);
tcf_hash_insert(a);
......@@ -131,20 +134,22 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
struct tcf_mirred *m = a->priv;
struct net_device *dev;
struct sk_buff *skb2;
int retval, err;
u32 at;
int retval, err = 1;
spin_lock(&m->tcf_lock);
m->tcf_tm.lastuse = jiffies;
bstats_update(&m->tcf_bstats, skb);
tcf_lastuse_update(&m->tcf_tm);
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
dev = m->tcfm_dev;
if (!dev) {
printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
rcu_read_lock();
retval = READ_ONCE(m->tcf_action);
dev = rcu_dereference(m->tcfm_dev);
if (unlikely(!dev)) {
pr_notice_once("tc mirred: target device is gone\n");
goto out;
}
if (!(dev->flags & IFF_UP)) {
if (unlikely(!(dev->flags & IFF_UP))) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
......@@ -152,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
at = G_TC_AT(skb->tc_verd);
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
if (!skb2)
goto out;
if (!(at & AT_EGRESS)) {
......@@ -168,16 +173,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
skb2->dev = dev;
err = dev_queue_xmit(skb2);
out:
if (err) {
m->tcf_qstats.overlimits++;
out:
qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
retval = TC_ACT_SHOT;
else
retval = m->tcf_action;
} else
retval = m->tcf_action;
spin_unlock(&m->tcf_lock);
}
rcu_read_unlock();
return retval;
}
......@@ -216,14 +218,16 @@ static int mirred_device_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tcf_mirred *m;
ASSERT_RTNL();
if (event == NETDEV_UNREGISTER)
list_for_each_entry(m, &mirred_list, tcfm_list) {
spin_lock_bh(&m->tcf_lock);
if (m->tcfm_dev == dev) {
if (rcu_access_pointer(m->tcfm_dev) == dev) {
dev_put(dev);
m->tcfm_dev = NULL;
/* Note : no rcu grace period necessary, as
* net_device are already rcu protected.
*/
RCU_INIT_POINTER(m->tcfm_dev, NULL);
}
spin_unlock_bh(&m->tcf_lock);
}
return NOTIFY_DONE;
......
......@@ -55,7 +55,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
parm = nla_data(tb[TCA_NAT_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
......
......@@ -57,7 +57,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, a, bind)) {
if (!parm->nkeys)
return -EINVAL;
ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
bind, false);
if (ret)
return ret;
p = to_pedit(a);
......
......@@ -103,7 +103,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
defdata = nla_data(tb[TCA_DEF_DATA]);
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
bind, false);
if (ret)
return ret;
......
......@@ -99,7 +99,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
bind, false);
if (ret)
return ret;
......
......@@ -116,7 +116,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
action = parm->v_action;
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
bind, false);
if (ret)
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment