Commit 0463d4ae authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

[NET_SCHED]: Eliminate qdisc_tree_lock

Since we're now holding the rtnl during the entire dump operation, we
can remove qdisc_tree_lock, whose only purpose is to protect dump
callbacks from concurrent changes to the qdisc tree.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ffa4d721
...@@ -13,8 +13,6 @@ struct qdisc_walker ...@@ -13,8 +13,6 @@ struct qdisc_walker
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
}; };
extern rwlock_t qdisc_tree_lock;
#define QDISC_ALIGNTO 32 #define QDISC_ALIGNTO 32
#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
......
...@@ -400,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -400,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len; return skb->len;
read_lock(&qdisc_tree_lock);
if (!tcm->tcm_parent) if (!tcm->tcm_parent)
q = dev->qdisc_sleeping; q = dev->qdisc_sleeping;
else else
...@@ -457,7 +456,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -457,7 +456,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cl) if (cl)
cops->put(q, cl); cops->put(q, cl);
out: out:
read_unlock(&qdisc_tree_lock);
dev_put(dev); dev_put(dev);
return skb->len; return skb->len;
} }
......
...@@ -191,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops) ...@@ -191,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
(root qdisc, all its children, children of children etc.) (root qdisc, all its children, children of children etc.)
*/ */
static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{ {
struct Qdisc *q; struct Qdisc *q;
...@@ -202,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) ...@@ -202,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
return NULL; return NULL;
} }
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
read_lock(&qdisc_tree_lock);
q = __qdisc_lookup(dev, handle);
read_unlock(&qdisc_tree_lock);
return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{ {
unsigned long cl; unsigned long cl;
...@@ -405,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) ...@@ -405,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
if (n == 0) if (n == 0)
return; return;
while ((parentid = sch->parent)) { while ((parentid = sch->parent)) {
sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
cops = sch->ops->cl_ops; cops = sch->ops->cl_ops;
if (cops->qlen_notify) { if (cops->qlen_notify) {
cl = cops->get(sch, parentid); cl = cops->get(sch, parentid);
...@@ -905,7 +895,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -905,7 +895,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
continue; continue;
if (idx > s_idx) if (idx > s_idx)
s_q_idx = 0; s_q_idx = 0;
read_lock(&qdisc_tree_lock);
q_idx = 0; q_idx = 0;
list_for_each_entry(q, &dev->qdisc_list, list) { list_for_each_entry(q, &dev->qdisc_list, list) {
if (q_idx < s_q_idx) { if (q_idx < s_q_idx) {
...@@ -913,13 +902,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -913,13 +902,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
continue; continue;
} }
if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) { cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
read_unlock(&qdisc_tree_lock);
goto done; goto done;
}
q_idx++; q_idx++;
} }
read_unlock(&qdisc_tree_lock);
} }
done: done:
...@@ -1142,7 +1128,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1142,7 +1128,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0]; s_t = cb->args[0];
t = 0; t = 0;
read_lock(&qdisc_tree_lock);
list_for_each_entry(q, &dev->qdisc_list, list) { list_for_each_entry(q, &dev->qdisc_list, list) {
if (t < s_t || !q->ops->cl_ops || if (t < s_t || !q->ops->cl_ops ||
(tcm->tcm_parent && (tcm->tcm_parent &&
...@@ -1164,7 +1149,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1164,7 +1149,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
break; break;
t++; t++;
} }
read_unlock(&qdisc_tree_lock);
cb->args[0] = t; cb->args[0] = t;
......
...@@ -36,34 +36,23 @@ ...@@ -36,34 +36,23 @@
/* Main transmission queue. */ /* Main transmission queue. */
/* Main qdisc structure lock. /* Modifications to data participating in scheduling must be protected with
* dev->queue_lock spinlock.
However, modifications *
to data, participating in scheduling must be additionally * The idea is the following:
protected with dev->queue_lock spinlock. * - enqueue, dequeue are serialized via top level device
* spinlock dev->queue_lock.
The idea is the following: * - updates to tree and tree walking are only done under the rtnl mutex.
- enqueue, dequeue are serialized via top level device
spinlock dev->queue_lock.
- tree walking is protected by read_lock(qdisc_tree_lock)
and this lock is used only in process context.
- updates to tree are made only under rtnl semaphore,
hence this lock may be made without local bh disabling.
qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
*/ */
DEFINE_RWLOCK(qdisc_tree_lock);
void qdisc_lock_tree(struct net_device *dev) void qdisc_lock_tree(struct net_device *dev)
{ {
write_lock(&qdisc_tree_lock);
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
} }
void qdisc_unlock_tree(struct net_device *dev) void qdisc_unlock_tree(struct net_device *dev)
{ {
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
write_unlock(&qdisc_tree_lock);
} }
/* /*
...@@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev) ...@@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev)
printk(KERN_INFO "%s: activation failed\n", dev->name); printk(KERN_INFO "%s: activation failed\n", dev->name);
return; return;
} }
write_lock(&qdisc_tree_lock);
list_add_tail(&qdisc->list, &dev->qdisc_list); list_add_tail(&qdisc->list, &dev->qdisc_list);
write_unlock(&qdisc_tree_lock);
} else { } else {
qdisc = &noqueue_qdisc; qdisc = &noqueue_qdisc;
} }
write_lock(&qdisc_tree_lock);
dev->qdisc_sleeping = qdisc; dev->qdisc_sleeping = qdisc;
write_unlock(&qdisc_tree_lock);
} }
if (!netif_carrier_ok(dev)) if (!netif_carrier_ok(dev))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment