Commit 3a7d0d07 authored by Vlad Buslov's avatar Vlad Buslov Committed by David S. Miller

net: sched: extend Qdisc with rcu

Currently, Qdisc API functions assume that users have rtnl lock taken. To
implement rtnl unlocked classifiers update interface, Qdisc API must be
extended with functions that do not require rtnl lock.

Extend Qdisc structure with rcu. Implement special version of put function
qdisc_put_unlocked() that is called without rtnl lock taken. This function
only takes rtnl lock if Qdisc reference counter reached zero and is
intended to be used as optimization.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 86bd446b
...@@ -85,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) ...@@ -85,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue); return rtnl_dereference(dev->ingress_queue);
} }
static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
{
return rcu_dereference(dev->ingress_queue);
}
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
#ifdef CONFIG_NET_INGRESS #ifdef CONFIG_NET_INGRESS
......
...@@ -102,6 +102,7 @@ int qdisc_set_default(const char *id); ...@@ -102,6 +102,7 @@ int qdisc_set_default(const char *id);
void qdisc_hash_add(struct Qdisc *q, bool invisible); void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q); void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab, struct nlattr *tab,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
......
...@@ -105,6 +105,7 @@ struct Qdisc { ...@@ -105,6 +105,7 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp; spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock; spinlock_t seqlock;
struct rcu_head rcu;
}; };
static inline void qdisc_refcount_inc(struct Qdisc *qdisc) static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
...@@ -555,6 +556,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, ...@@ -555,6 +556,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc); struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc); void qdisc_reset(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc); void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len); unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
......
...@@ -314,6 +314,24 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) ...@@ -314,6 +314,24 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
return q; return q;
} }
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
{
struct netdev_queue *nq;
struct Qdisc *q;
if (!handle)
return NULL;
q = qdisc_match_from_root(dev->qdisc, handle);
if (q)
goto out;
nq = dev_ingress_queue_rcu(dev);
if (nq)
q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
out:
return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{ {
unsigned long cl; unsigned long cl;
......
...@@ -941,6 +941,13 @@ void qdisc_free(struct Qdisc *qdisc) ...@@ -941,6 +941,13 @@ void qdisc_free(struct Qdisc *qdisc)
kfree((char *) qdisc - qdisc->padded); kfree((char *) qdisc - qdisc->padded);
} }
void qdisc_free_cb(struct rcu_head *head)
{
struct Qdisc *q = container_of(head, struct Qdisc, rcu);
qdisc_free(q);
}
static void qdisc_destroy(struct Qdisc *qdisc) static void qdisc_destroy(struct Qdisc *qdisc)
{ {
const struct Qdisc_ops *ops = qdisc->ops; const struct Qdisc_ops *ops = qdisc->ops;
...@@ -970,7 +977,7 @@ static void qdisc_destroy(struct Qdisc *qdisc) ...@@ -970,7 +977,7 @@ static void qdisc_destroy(struct Qdisc *qdisc)
kfree_skb_list(skb); kfree_skb_list(skb);
} }
qdisc_free(qdisc); call_rcu(&qdisc->rcu, qdisc_free_cb);
} }
void qdisc_put(struct Qdisc *qdisc) void qdisc_put(struct Qdisc *qdisc)
...@@ -983,6 +990,22 @@ void qdisc_put(struct Qdisc *qdisc) ...@@ -983,6 +990,22 @@ void qdisc_put(struct Qdisc *qdisc)
} }
EXPORT_SYMBOL(qdisc_put); EXPORT_SYMBOL(qdisc_put);
/* Version of qdisc_put() that is called with rtnl mutex unlocked.
* Intended to be used as optimization, this function only takes rtnl lock if
* qdisc reference counter reached zero.
*/
void qdisc_put_unlocked(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_rtnl_lock(&qdisc->refcnt))
return;
qdisc_destroy(qdisc);
rtnl_unlock();
}
EXPORT_SYMBOL(qdisc_put_unlocked);
/* Attach toplevel qdisc to device queue. */ /* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc) struct Qdisc *qdisc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment