Commit 290aa0ad authored by Vlad Buslov's avatar Vlad Buslov Committed by David S. Miller

net: sched: don't disable bh when accessing action idr

Initial net_device implementation used ingress_lock spinlock to synchronize
ingress path of device. This lock was used in both process and bh context.
In some code paths action map lock was obtained while holding ingress_lock.
Commit e1e992e5 ("[NET_SCHED] protect action config/dump from irqs")
modified actions to always disable bh, while using action map lock, in
order to prevent deadlock on ingress_lock in softirq. This lock was removed
from net_device, so disabling bh, while accessing action map, is no longer
necessary.

Replace all action idr spinlock usage with regular calls that do not
disable bh.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Acked-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 73bf1fc5
......@@ -77,9 +77,9 @@ static void free_tcf(struct tc_action *p)
static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
{
spin_lock_bh(&idrinfo->lock);
spin_lock(&idrinfo->lock);
idr_remove(&idrinfo->action_idr, p->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
spin_unlock(&idrinfo->lock);
gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p);
}
......@@ -156,7 +156,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct tc_action *p;
unsigned long id = 1;
spin_lock_bh(&idrinfo->lock);
spin_lock(&idrinfo->lock);
s_i = cb->args[0];
......@@ -191,7 +191,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
if (index >= 0)
cb->args[0] = index + 1;
spin_unlock_bh(&idrinfo->lock);
spin_unlock(&idrinfo->lock);
if (n_i) {
if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
cb->args[1] = n_i;
......@@ -261,9 +261,9 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
{
struct tc_action *p = NULL;
spin_lock_bh(&idrinfo->lock);
spin_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index);
spin_unlock_bh(&idrinfo->lock);
spin_unlock(&idrinfo->lock);
return p;
}
......@@ -323,7 +323,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
}
spin_lock_init(&p->tcfa_lock);
idr_preload(GFP_KERNEL);
spin_lock_bh(&idrinfo->lock);
spin_lock(&idrinfo->lock);
/* user doesn't specify an index */
if (!index) {
index = 1;
......@@ -331,7 +331,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
} else {
err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
}
spin_unlock_bh(&idrinfo->lock);
spin_unlock(&idrinfo->lock);
idr_preload_end();
if (err)
goto err3;
......@@ -369,9 +369,9 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
spin_lock_bh(&idrinfo->lock);
spin_lock(&idrinfo->lock);
idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
spin_unlock(&idrinfo->lock);
}
EXPORT_SYMBOL(tcf_idr_insert);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment