Commit 5356f3d7 authored by Ying Xue's avatar Ying Xue Committed by David S. Miller

tipc: always use tipc_node_lock() to hold node lock

Although we obtain node lock with tipc_node_lock() in most time, there
are still places where we directly use native spin lock interface
to grab node lock. But as we will do more jobs in the future when node
lock is released, we should ensure that tipc_node_lock() is always
called when node lock is taken.
Signed-off-by: default avatarYing Xue <ying.xue@windriver.com>
Reviewed-by: default avatarErik Hugne <erik.hugne@ericsson.com>
Reviewed-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5b579e21
...@@ -297,14 +297,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) ...@@ -297,14 +297,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
spin_lock_bh(&n_ptr->lock); tipc_node_lock(n_ptr);
l_ptr = n_ptr->links[bearer_id]; l_ptr = n_ptr->links[bearer_id];
if (l_ptr) { if (l_ptr) {
tipc_link_reset(l_ptr); tipc_link_reset(l_ptr);
if (shutting_down || !tipc_node_is_up(n_ptr)) { if (shutting_down || !tipc_node_is_up(n_ptr)) {
tipc_node_detach_link(l_ptr->owner, l_ptr); tipc_node_detach_link(l_ptr->owner, l_ptr);
tipc_link_reset_fragments(l_ptr); tipc_link_reset_fragments(l_ptr);
spin_unlock_bh(&n_ptr->lock); tipc_node_unlock(n_ptr);
/* Nobody else can access this link now: */ /* Nobody else can access this link now: */
del_timer_sync(&l_ptr->timer); del_timer_sync(&l_ptr->timer);
...@@ -312,12 +312,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) ...@@ -312,12 +312,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
} else { } else {
/* Detach/delete when failover is finished: */ /* Detach/delete when failover is finished: */
l_ptr->flags |= LINK_STOPPED; l_ptr->flags |= LINK_STOPPED;
spin_unlock_bh(&n_ptr->lock); tipc_node_unlock(n_ptr);
del_timer_sync(&l_ptr->timer); del_timer_sync(&l_ptr->timer);
} }
continue; continue;
} }
spin_unlock_bh(&n_ptr->lock); tipc_node_unlock(n_ptr);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -474,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id) ...@@ -474,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
spin_lock_bh(&n_ptr->lock); tipc_node_lock(n_ptr);
l_ptr = n_ptr->links[bearer_id]; l_ptr = n_ptr->links[bearer_id];
if (l_ptr) if (l_ptr)
tipc_link_reset(l_ptr); tipc_link_reset(l_ptr);
spin_unlock_bh(&n_ptr->lock); tipc_node_unlock(n_ptr);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
......
...@@ -135,18 +135,18 @@ void named_cluster_distribute(struct sk_buff *buf) ...@@ -135,18 +135,18 @@ void named_cluster_distribute(struct sk_buff *buf)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
spin_lock_bh(&n_ptr->lock); tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[n_ptr->addr & 1]; l_ptr = n_ptr->active_links[n_ptr->addr & 1];
if (l_ptr) { if (l_ptr) {
buf_copy = skb_copy(buf, GFP_ATOMIC); buf_copy = skb_copy(buf, GFP_ATOMIC);
if (!buf_copy) { if (!buf_copy) {
spin_unlock_bh(&n_ptr->lock); tipc_node_unlock(n_ptr);
break; break;
} }
msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
__tipc_link_xmit(l_ptr, buf_copy); __tipc_link_xmit(l_ptr, buf_copy);
} }
spin_unlock_bh(&n_ptr->lock); tipc_node_unlock(n_ptr);
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment