Commit de133464 authored by WANG Cong's avatar WANG Cong Committed by David S. Miller

netns: make nsid_lock per net

The spinlock is used to protect netns_ids which is per net,
so there is no need to use a global spinlock.

Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Acked-by: default avatarNicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4ab7f913
...@@ -58,6 +58,7 @@ struct net { ...@@ -58,6 +58,7 @@ struct net {
struct list_head exit_list; /* Use only net_mutex */ struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */ struct user_namespace *user_ns; /* Owning user namespace */
spinlock_t nsid_lock;
struct idr netns_ids; struct idr netns_ids;
struct ns_common ns; struct ns_common ns;
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
static LIST_HEAD(pernet_list); static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list; static struct list_head *first_device = &pernet_list;
DEFINE_MUTEX(net_mutex); DEFINE_MUTEX(net_mutex);
static DEFINE_SPINLOCK(nsid_lock);
LIST_HEAD(net_namespace_list); LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list);
...@@ -218,10 +217,10 @@ int peernet2id_alloc(struct net *net, struct net *peer) ...@@ -218,10 +217,10 @@ int peernet2id_alloc(struct net *net, struct net *peer)
bool alloc; bool alloc;
int id; int id;
spin_lock_irqsave(&nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true; alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc); id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_irqrestore(&nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
if (alloc && id >= 0) if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id); rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id; return id;
...@@ -234,9 +233,9 @@ int peernet2id(struct net *net, struct net *peer) ...@@ -234,9 +233,9 @@ int peernet2id(struct net *net, struct net *peer)
unsigned long flags; unsigned long flags;
int id; int id;
spin_lock_irqsave(&nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
id = __peernet2id(net, peer); id = __peernet2id(net, peer);
spin_unlock_irqrestore(&nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
return id; return id;
} }
...@@ -257,11 +256,11 @@ struct net *get_net_ns_by_id(struct net *net, int id) ...@@ -257,11 +256,11 @@ struct net *get_net_ns_by_id(struct net *net, int id)
return NULL; return NULL;
rcu_read_lock(); rcu_read_lock();
spin_lock_irqsave(&nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id); peer = idr_find(&net->netns_ids, id);
if (peer) if (peer)
get_net(peer); get_net(peer);
spin_unlock_irqrestore(&nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock(); rcu_read_unlock();
return peer; return peer;
...@@ -282,6 +281,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) ...@@ -282,6 +281,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
net->dev_base_seq = 1; net->dev_base_seq = 1;
net->user_ns = user_ns; net->user_ns = user_ns;
idr_init(&net->netns_ids); idr_init(&net->netns_ids);
spin_lock_init(&net->nsid_lock);
list_for_each_entry(ops, &pernet_list, list) { list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net); error = ops_init(ops, net);
...@@ -404,17 +404,17 @@ static void cleanup_net(struct work_struct *work) ...@@ -404,17 +404,17 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) { for_each_net(tmp) {
int id; int id;
spin_lock_irq(&nsid_lock); spin_lock_irq(&tmp->nsid_lock);
id = __peernet2id(tmp, net); id = __peernet2id(tmp, net);
if (id >= 0) if (id >= 0)
idr_remove(&tmp->netns_ids, id); idr_remove(&tmp->netns_ids, id);
spin_unlock_irq(&nsid_lock); spin_unlock_irq(&tmp->nsid_lock);
if (id >= 0) if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id); rtnl_net_notifyid(tmp, RTM_DELNSID, id);
} }
spin_lock_irq(&nsid_lock); spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids); idr_destroy(&net->netns_ids);
spin_unlock_irq(&nsid_lock); spin_unlock_irq(&net->nsid_lock);
} }
rtnl_unlock(); rtnl_unlock();
...@@ -563,15 +563,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) ...@@ -563,15 +563,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer)) if (IS_ERR(peer))
return PTR_ERR(peer); return PTR_ERR(peer);
spin_lock_irqsave(&nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
if (__peernet2id(net, peer) >= 0) { if (__peernet2id(net, peer) >= 0) {
spin_unlock_irqrestore(&nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST; err = -EEXIST;
goto out; goto out;
} }
err = alloc_netid(net, peer, nsid); err = alloc_netid(net, peer, nsid);
spin_unlock_irqrestore(&nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
if (err >= 0) { if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err); rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0; err = 0;
...@@ -695,9 +695,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -695,9 +695,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
}; };
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
spin_unlock_irqrestore(&nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx; cb->args[0] = net_cb.idx;
return skb->len; return skb->len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment