Commit 0607e439 authored by Vlad Buslov's avatar Vlad Buslov Committed by David S. Miller

net: sched: implement tcf_block_refcnt_{get|put}()

Implement get/put function for blocks that only take/release the reference
and perform deallocation. These functions are intended to be used by
unlocked rules update path to always hold reference to block while working
with it. They use on new fine-grained locking mechanisms introduced in
previous patches in this set, instead of relying on global protection
provided by rtnl lock.

Extract code that is common with tcf_block_detach_ext() into common
function __tcf_block_put().

Extend tcf_block with rcu to allow safe deallocation when it is accessed
concurrently.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ab281629
...@@ -357,6 +357,7 @@ struct tcf_block { ...@@ -357,6 +357,7 @@ struct tcf_block {
struct tcf_chain *chain; struct tcf_chain *chain;
struct list_head filter_chain_list; struct list_head filter_chain_list;
} chain0; } chain0;
struct rcu_head rcu;
}; };
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
......
...@@ -241,7 +241,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain) ...@@ -241,7 +241,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain)
block->chain0.chain = NULL; block->chain0.chain = NULL;
kfree(chain); kfree(chain);
if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt)) if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
kfree(block); kfree_rcu(block, rcu);
} }
static void tcf_chain_hold(struct tcf_chain *chain) static void tcf_chain_hold(struct tcf_chain *chain)
...@@ -537,6 +537,19 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) ...@@ -537,6 +537,19 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
return idr_find(&tn->idr, block_index); return idr_find(&tn->idr, block_index);
} }
static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
{
struct tcf_block *block;
rcu_read_lock();
block = tcf_block_lookup(net, block_index);
if (block && !refcount_inc_not_zero(&block->refcnt))
block = NULL;
rcu_read_unlock();
return block;
}
static void tcf_block_flush_all_chains(struct tcf_block *block) static void tcf_block_flush_all_chains(struct tcf_block *block)
{ {
struct tcf_chain *chain; struct tcf_chain *chain;
...@@ -562,6 +575,40 @@ static void tcf_block_put_all_chains(struct tcf_block *block) ...@@ -562,6 +575,40 @@ static void tcf_block_put_all_chains(struct tcf_block *block)
} }
} }
static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei)
{
if (refcount_dec_and_test(&block->refcnt)) {
/* Flushing/putting all chains will cause the block to be
* deallocated when last chain is freed. However, if chain_list
* is empty, block has to be manually deallocated. After block
* reference counter reached 0, it is no longer possible to
* increment it or add new chains to block.
*/
bool free_block = list_empty(&block->chain_list);
if (tcf_block_shared(block))
tcf_block_remove(block, block->net);
if (!free_block)
tcf_block_flush_all_chains(block);
if (q)
tcf_block_offload_unbind(block, q, ei);
if (free_block)
kfree_rcu(block, rcu);
else
tcf_block_put_all_chains(block);
} else if (q) {
tcf_block_offload_unbind(block, q, ei);
}
}
static void tcf_block_refcnt_put(struct tcf_block *block)
{
__tcf_block_put(block, NULL, NULL);
}
/* Find tcf block. /* Find tcf block.
* Set q, parent, cl when appropriate. * Set q, parent, cl when appropriate.
*/ */
...@@ -786,7 +833,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, ...@@ -786,7 +833,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
if (tcf_block_shared(block)) if (tcf_block_shared(block))
tcf_block_remove(block, net); tcf_block_remove(block, net);
err_block_insert: err_block_insert:
kfree(block); kfree_rcu(block, rcu);
} else { } else {
refcount_dec(&block->refcnt); refcount_dec(&block->refcnt);
} }
...@@ -826,28 +873,7 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, ...@@ -826,28 +873,7 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
tcf_chain0_head_change_cb_del(block, ei); tcf_chain0_head_change_cb_del(block, ei);
tcf_block_owner_del(block, q, ei->binder_type); tcf_block_owner_del(block, q, ei->binder_type);
if (refcount_dec_and_test(&block->refcnt)) { __tcf_block_put(block, q, ei);
/* Flushing/putting all chains will cause the block to be
* deallocated when last chain is freed. However, if chain_list
* is empty, block has to be manually deallocated. After block
* reference counter reached 0, it is no longer possible to
* increment it or add new chains to block.
*/
bool free_block = list_empty(&block->chain_list);
if (tcf_block_shared(block))
tcf_block_remove(block, block->net);
if (!free_block)
tcf_block_flush_all_chains(block);
tcf_block_offload_unbind(block, q, ei);
if (free_block)
kfree(block);
else
tcf_block_put_all_chains(block);
} else {
tcf_block_offload_unbind(block, q, ei);
}
} }
EXPORT_SYMBOL(tcf_block_put_ext); EXPORT_SYMBOL(tcf_block_put_ext);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment