Commit 4a3e2f71 authored by Arjan van de Ven's avatar Arjan van de Ven Committed by David S. Miller

[NET] sem2mutex: net/

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d4ccd08c
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/pfkeyv2.h> #include <linux/pfkeyv2.h>
#include <linux/in6.h> #include <linux/in6.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/dst.h> #include <net/dst.h>
...@@ -24,7 +25,7 @@ extern struct sock *xfrm_nl; ...@@ -24,7 +25,7 @@ extern struct sock *xfrm_nl;
extern u32 sysctl_xfrm_aevent_etime; extern u32 sysctl_xfrm_aevent_etime;
extern u32 sysctl_xfrm_aevent_rseqth; extern u32 sysctl_xfrm_aevent_rseqth;
extern struct semaphore xfrm_cfg_sem; extern struct mutex xfrm_cfg_mutex;
/* Organization of SPD aka "XFRM rules" /* Organization of SPD aka "XFRM rules"
------------------------------------ ------------------------------------
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/atmmpc.h> #include <linux/atmmpc.h>
#include <net/atmclip.h> #include <net/atmclip.h>
#include <linux/atmlec.h> #include <linux/atmlec.h>
#include <linux/mutex.h>
#include <asm/ioctls.h> #include <asm/ioctls.h>
#include "resources.h" #include "resources.h"
...@@ -25,22 +26,22 @@ ...@@ -25,22 +26,22 @@
#include "common.h" #include "common.h"
static DECLARE_MUTEX(ioctl_mutex); static DEFINE_MUTEX(ioctl_mutex);
static LIST_HEAD(ioctl_list); static LIST_HEAD(ioctl_list);
void register_atm_ioctl(struct atm_ioctl *ioctl) void register_atm_ioctl(struct atm_ioctl *ioctl)
{ {
down(&ioctl_mutex); mutex_lock(&ioctl_mutex);
list_add_tail(&ioctl->list, &ioctl_list); list_add_tail(&ioctl->list, &ioctl_list);
up(&ioctl_mutex); mutex_unlock(&ioctl_mutex);
} }
void deregister_atm_ioctl(struct atm_ioctl *ioctl) void deregister_atm_ioctl(struct atm_ioctl *ioctl)
{ {
down(&ioctl_mutex); mutex_lock(&ioctl_mutex);
list_del(&ioctl->list); list_del(&ioctl->list);
up(&ioctl_mutex); mutex_unlock(&ioctl_mutex);
} }
EXPORT_SYMBOL(register_atm_ioctl); EXPORT_SYMBOL(register_atm_ioctl);
...@@ -137,7 +138,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) ...@@ -137,7 +138,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
error = -ENOIOCTLCMD; error = -ENOIOCTLCMD;
down(&ioctl_mutex); mutex_lock(&ioctl_mutex);
list_for_each(pos, &ioctl_list) { list_for_each(pos, &ioctl_list) {
struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list);
if (try_module_get(ic->owner)) { if (try_module_get(ic->owner)) {
...@@ -147,7 +148,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) ...@@ -147,7 +148,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break; break;
} }
} }
up(&ioctl_mutex); mutex_unlock(&ioctl_mutex);
if (error != -ENOIOCTLCMD) if (error != -ENOIOCTLCMD)
goto done; goto done;
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
...@@ -57,9 +59,9 @@ static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; ...@@ -57,9 +59,9 @@ static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
static struct task_struct *rfcomm_thread; static struct task_struct *rfcomm_thread;
static DECLARE_MUTEX(rfcomm_sem); static DEFINE_MUTEX(rfcomm_mutex);
#define rfcomm_lock() down(&rfcomm_sem); #define rfcomm_lock() mutex_lock(&rfcomm_mutex)
#define rfcomm_unlock() up(&rfcomm_sem); #define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
static unsigned long rfcomm_event; static unsigned long rfcomm_event;
......
...@@ -81,6 +81,7 @@ ...@@ -81,6 +81,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/socket.h> #include <linux/socket.h>
...@@ -2931,7 +2932,7 @@ static void netdev_wait_allrefs(struct net_device *dev) ...@@ -2931,7 +2932,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
* 2) Since we run with the RTNL semaphore not held, we can sleep * 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero. * safely in order to wait for the netdev refcnt to drop to zero.
*/ */
static DECLARE_MUTEX(net_todo_run_mutex); static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void) void netdev_run_todo(void)
{ {
struct list_head list = LIST_HEAD_INIT(list); struct list_head list = LIST_HEAD_INIT(list);
...@@ -2939,7 +2940,7 @@ void netdev_run_todo(void) ...@@ -2939,7 +2940,7 @@ void netdev_run_todo(void)
/* Need to guard against multiple cpu's getting out of order. */ /* Need to guard against multiple cpu's getting out of order. */
down(&net_todo_run_mutex); mutex_lock(&net_todo_run_mutex);
/* Not safe to do outside the semaphore. We must not return /* Not safe to do outside the semaphore. We must not return
* until all unregister events invoked by the local processor * until all unregister events invoked by the local processor
...@@ -2996,7 +2997,7 @@ void netdev_run_todo(void) ...@@ -2996,7 +2997,7 @@ void netdev_run_todo(void)
} }
out: out:
up(&net_todo_run_mutex); mutex_unlock(&net_todo_run_mutex);
} }
/** /**
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/mutex.h>
#include <net/flow.h> #include <net/flow.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
...@@ -287,11 +288,11 @@ static void flow_cache_flush_per_cpu(void *data) ...@@ -287,11 +288,11 @@ static void flow_cache_flush_per_cpu(void *data)
void flow_cache_flush(void) void flow_cache_flush(void)
{ {
struct flow_flush_info info; struct flow_flush_info info;
static DECLARE_MUTEX(flow_flush_sem); static DEFINE_MUTEX(flow_flush_sem);
/* Don't want cpus going down or up during this. */ /* Don't want cpus going down or up during this. */
lock_cpu_hotplug(); lock_cpu_hotplug();
down(&flow_flush_sem); mutex_lock(&flow_flush_sem);
atomic_set(&info.cpuleft, num_online_cpus()); atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion); init_completion(&info.completion);
...@@ -301,7 +302,7 @@ void flow_cache_flush(void) ...@@ -301,7 +302,7 @@ void flow_cache_flush(void)
local_bh_enable(); local_bh_enable();
wait_for_completion(&info.completion); wait_for_completion(&info.completion);
up(&flow_flush_sem); mutex_unlock(&flow_flush_sem);
unlock_cpu_hotplug(); unlock_cpu_hotplug();
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/mutex.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/icmp.h> #include <net/icmp.h>
...@@ -36,7 +37,7 @@ struct ipcomp_tfms { ...@@ -36,7 +37,7 @@ struct ipcomp_tfms {
int users; int users;
}; };
static DECLARE_MUTEX(ipcomp_resource_sem); static DEFINE_MUTEX(ipcomp_resource_mutex);
static void **ipcomp_scratches; static void **ipcomp_scratches;
static int ipcomp_scratch_users; static int ipcomp_scratch_users;
static LIST_HEAD(ipcomp_tfms_list); static LIST_HEAD(ipcomp_tfms_list);
...@@ -253,7 +254,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) ...@@ -253,7 +254,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
} }
/* /*
* Must be protected by xfrm_cfg_sem. State and tunnel user references are * Must be protected by xfrm_cfg_mutex. State and tunnel user references are
* always incremented on success. * always incremented on success.
*/ */
static int ipcomp_tunnel_attach(struct xfrm_state *x) static int ipcomp_tunnel_attach(struct xfrm_state *x)
...@@ -411,9 +412,9 @@ static void ipcomp_destroy(struct xfrm_state *x) ...@@ -411,9 +412,9 @@ static void ipcomp_destroy(struct xfrm_state *x)
if (!ipcd) if (!ipcd)
return; return;
xfrm_state_delete_tunnel(x); xfrm_state_delete_tunnel(x);
down(&ipcomp_resource_sem); mutex_lock(&ipcomp_resource_mutex);
ipcomp_free_data(ipcd); ipcomp_free_data(ipcd);
up(&ipcomp_resource_sem); mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd); kfree(ipcd);
} }
...@@ -440,14 +441,14 @@ static int ipcomp_init_state(struct xfrm_state *x) ...@@ -440,14 +441,14 @@ static int ipcomp_init_state(struct xfrm_state *x)
if (x->props.mode) if (x->props.mode)
x->props.header_len += sizeof(struct iphdr); x->props.header_len += sizeof(struct iphdr);
down(&ipcomp_resource_sem); mutex_lock(&ipcomp_resource_mutex);
if (!ipcomp_alloc_scratches()) if (!ipcomp_alloc_scratches())
goto error; goto error;
ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
if (!ipcd->tfms) if (!ipcd->tfms)
goto error; goto error;
up(&ipcomp_resource_sem); mutex_unlock(&ipcomp_resource_mutex);
if (x->props.mode) { if (x->props.mode) {
err = ipcomp_tunnel_attach(x); err = ipcomp_tunnel_attach(x);
...@@ -464,10 +465,10 @@ static int ipcomp_init_state(struct xfrm_state *x) ...@@ -464,10 +465,10 @@ static int ipcomp_init_state(struct xfrm_state *x)
return err; return err;
error_tunnel: error_tunnel:
down(&ipcomp_resource_sem); mutex_lock(&ipcomp_resource_mutex);
error: error:
ipcomp_free_data(ipcd); ipcomp_free_data(ipcd);
up(&ipcomp_resource_sem); mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd); kfree(ipcd);
goto out; goto out;
} }
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/route.h> #include <net/route.h>
...@@ -61,7 +62,7 @@ static unsigned int queue_dropped = 0; ...@@ -61,7 +62,7 @@ static unsigned int queue_dropped = 0;
static unsigned int queue_user_dropped = 0; static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl; static struct sock *ipqnl;
static LIST_HEAD(queue_list); static LIST_HEAD(queue_list);
static DECLARE_MUTEX(ipqnl_sem); static DEFINE_MUTEX(ipqnl_mutex);
static void static void
ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
...@@ -539,7 +540,7 @@ ipq_rcv_sk(struct sock *sk, int len) ...@@ -539,7 +540,7 @@ ipq_rcv_sk(struct sock *sk, int len)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int qlen; unsigned int qlen;
down(&ipqnl_sem); mutex_lock(&ipqnl_mutex);
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
skb = skb_dequeue(&sk->sk_receive_queue); skb = skb_dequeue(&sk->sk_receive_queue);
...@@ -547,7 +548,7 @@ ipq_rcv_sk(struct sock *sk, int len) ...@@ -547,7 +548,7 @@ ipq_rcv_sk(struct sock *sk, int len)
kfree_skb(skb); kfree_skb(skb);
} }
up(&ipqnl_sem); mutex_unlock(&ipqnl_mutex);
} }
static int static int
...@@ -708,8 +709,8 @@ init_or_cleanup(int init) ...@@ -708,8 +709,8 @@ init_or_cleanup(int init)
cleanup_ipqnl: cleanup_ipqnl:
sock_release(ipqnl->sk_socket); sock_release(ipqnl->sk_socket);
down(&ipqnl_sem); mutex_lock(&ipqnl_mutex);
up(&ipqnl_sem); mutex_unlock(&ipqnl_mutex);
cleanup_netlink_notifier: cleanup_netlink_notifier:
netlink_unregister_notifier(&ipq_nl_notifier); netlink_unregister_notifier(&ipq_nl_notifier);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/protocol.h> #include <net/protocol.h>
...@@ -26,19 +27,19 @@ static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, s ...@@ -26,19 +27,19 @@ static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, s
} }
static struct xfrm_tunnel *ipip_handler; static struct xfrm_tunnel *ipip_handler;
static DECLARE_MUTEX(xfrm4_tunnel_sem); static DEFINE_MUTEX(xfrm4_tunnel_mutex);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler) int xfrm4_tunnel_register(struct xfrm_tunnel *handler)
{ {
int ret; int ret;
down(&xfrm4_tunnel_sem); mutex_lock(&xfrm4_tunnel_mutex);
ret = 0; ret = 0;
if (ipip_handler != NULL) if (ipip_handler != NULL)
ret = -EINVAL; ret = -EINVAL;
if (!ret) if (!ret)
ipip_handler = handler; ipip_handler = handler;
up(&xfrm4_tunnel_sem); mutex_unlock(&xfrm4_tunnel_mutex);
return ret; return ret;
} }
...@@ -49,13 +50,13 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler) ...@@ -49,13 +50,13 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler)
{ {
int ret; int ret;
down(&xfrm4_tunnel_sem); mutex_lock(&xfrm4_tunnel_mutex);
ret = 0; ret = 0;
if (ipip_handler != handler) if (ipip_handler != handler)
ret = -EINVAL; ret = -EINVAL;
if (!ret) if (!ret)
ipip_handler = NULL; ipip_handler = NULL;
up(&xfrm4_tunnel_sem); mutex_unlock(&xfrm4_tunnel_mutex);
synchronize_net(); synchronize_net();
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <net/protocol.h> #include <net/protocol.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/mutex.h>
struct ipcomp6_tfms { struct ipcomp6_tfms {
struct list_head list; struct list_head list;
...@@ -57,7 +58,7 @@ struct ipcomp6_tfms { ...@@ -57,7 +58,7 @@ struct ipcomp6_tfms {
int users; int users;
}; };
static DECLARE_MUTEX(ipcomp6_resource_sem); static DEFINE_MUTEX(ipcomp6_resource_mutex);
static void **ipcomp6_scratches; static void **ipcomp6_scratches;
static int ipcomp6_scratch_users; static int ipcomp6_scratch_users;
static LIST_HEAD(ipcomp6_tfms_list); static LIST_HEAD(ipcomp6_tfms_list);
...@@ -405,9 +406,9 @@ static void ipcomp6_destroy(struct xfrm_state *x) ...@@ -405,9 +406,9 @@ static void ipcomp6_destroy(struct xfrm_state *x)
if (!ipcd) if (!ipcd)
return; return;
xfrm_state_delete_tunnel(x); xfrm_state_delete_tunnel(x);
down(&ipcomp6_resource_sem); mutex_lock(&ipcomp6_resource_mutex);
ipcomp6_free_data(ipcd); ipcomp6_free_data(ipcd);
up(&ipcomp6_resource_sem); mutex_unlock(&ipcomp6_resource_mutex);
kfree(ipcd); kfree(ipcd);
xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
...@@ -436,14 +437,14 @@ static int ipcomp6_init_state(struct xfrm_state *x) ...@@ -436,14 +437,14 @@ static int ipcomp6_init_state(struct xfrm_state *x)
if (x->props.mode) if (x->props.mode)
x->props.header_len += sizeof(struct ipv6hdr); x->props.header_len += sizeof(struct ipv6hdr);
down(&ipcomp6_resource_sem); mutex_lock(&ipcomp6_resource_mutex);
if (!ipcomp6_alloc_scratches()) if (!ipcomp6_alloc_scratches())
goto error; goto error;
ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name); ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name);
if (!ipcd->tfms) if (!ipcd->tfms)
goto error; goto error;
up(&ipcomp6_resource_sem); mutex_unlock(&ipcomp6_resource_mutex);
if (x->props.mode) { if (x->props.mode) {
err = ipcomp6_tunnel_attach(x); err = ipcomp6_tunnel_attach(x);
...@@ -459,10 +460,10 @@ static int ipcomp6_init_state(struct xfrm_state *x) ...@@ -459,10 +460,10 @@ static int ipcomp6_init_state(struct xfrm_state *x)
out: out:
return err; return err;
error_tunnel: error_tunnel:
down(&ipcomp6_resource_sem); mutex_lock(&ipcomp6_resource_mutex);
error: error:
ipcomp6_free_data(ipcd); ipcomp6_free_data(ipcd);
up(&ipcomp6_resource_sem); mutex_unlock(&ipcomp6_resource_mutex);
kfree(ipcd); kfree(ipcd);
goto out; goto out;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/ip6_route.h> #include <net/ip6_route.h>
...@@ -65,7 +66,7 @@ static unsigned int queue_dropped = 0; ...@@ -65,7 +66,7 @@ static unsigned int queue_dropped = 0;
static unsigned int queue_user_dropped = 0; static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl; static struct sock *ipqnl;
static LIST_HEAD(queue_list); static LIST_HEAD(queue_list);
static DECLARE_MUTEX(ipqnl_sem); static DEFINE_MUTEX(ipqnl_mutex);
static void static void
ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
...@@ -537,7 +538,7 @@ ipq_rcv_sk(struct sock *sk, int len) ...@@ -537,7 +538,7 @@ ipq_rcv_sk(struct sock *sk, int len)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int qlen; unsigned int qlen;
down(&ipqnl_sem); mutex_lock(&ipqnl_mutex);
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
skb = skb_dequeue(&sk->sk_receive_queue); skb = skb_dequeue(&sk->sk_receive_queue);
...@@ -545,7 +546,7 @@ ipq_rcv_sk(struct sock *sk, int len) ...@@ -545,7 +546,7 @@ ipq_rcv_sk(struct sock *sk, int len)
kfree_skb(skb); kfree_skb(skb);
} }
up(&ipqnl_sem); mutex_unlock(&ipqnl_mutex);
} }
static int static int
...@@ -704,8 +705,8 @@ init_or_cleanup(int init) ...@@ -704,8 +705,8 @@ init_or_cleanup(int init)
cleanup_ipqnl: cleanup_ipqnl:
sock_release(ipqnl->sk_socket); sock_release(ipqnl->sk_socket);
down(&ipqnl_sem); mutex_lock(&ipqnl_mutex);
up(&ipqnl_sem); mutex_unlock(&ipqnl_mutex);
cleanup_netlink_notifier: cleanup_netlink_notifier:
netlink_unregister_notifier(&ipq_nl_notifier); netlink_unregister_notifier(&ipq_nl_notifier);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <net/protocol.h> #include <net/protocol.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/mutex.h>
#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
# define X6TDEBUG 3 # define X6TDEBUG 3
...@@ -357,19 +358,19 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *dec ...@@ -357,19 +358,19 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *dec
} }
static struct xfrm6_tunnel *xfrm6_tunnel_handler; static struct xfrm6_tunnel *xfrm6_tunnel_handler;
static DECLARE_MUTEX(xfrm6_tunnel_sem); static DEFINE_MUTEX(xfrm6_tunnel_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler) int xfrm6_tunnel_register(struct xfrm6_tunnel *handler)
{ {
int ret; int ret;
down(&xfrm6_tunnel_sem); mutex_lock(&xfrm6_tunnel_mutex);
ret = 0; ret = 0;
if (xfrm6_tunnel_handler != NULL) if (xfrm6_tunnel_handler != NULL)
ret = -EINVAL; ret = -EINVAL;
if (!ret) if (!ret)
xfrm6_tunnel_handler = handler; xfrm6_tunnel_handler = handler;
up(&xfrm6_tunnel_sem); mutex_unlock(&xfrm6_tunnel_mutex);
return ret; return ret;
} }
...@@ -380,13 +381,13 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler) ...@@ -380,13 +381,13 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
{ {
int ret; int ret;
down(&xfrm6_tunnel_sem); mutex_lock(&xfrm6_tunnel_mutex);
ret = 0; ret = 0;
if (xfrm6_tunnel_handler != handler) if (xfrm6_tunnel_handler != handler)
ret = -EINVAL; ret = -EINVAL;
if (!ret) if (!ret)
xfrm6_tunnel_handler = NULL; xfrm6_tunnel_handler = NULL;
up(&xfrm6_tunnel_sem); mutex_unlock(&xfrm6_tunnel_mutex);
synchronize_net(); synchronize_net();
......
...@@ -3080,9 +3080,9 @@ static int pfkey_sendmsg(struct kiocb *kiocb, ...@@ -3080,9 +3080,9 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
if (!hdr) if (!hdr)
goto out; goto out;
down(&xfrm_cfg_sem); mutex_lock(&xfrm_cfg_mutex);
err = pfkey_process(sk, skb, hdr); err = pfkey_process(sk, skb, hdr);
up(&xfrm_cfg_sem); mutex_unlock(&xfrm_cfg_mutex);
out: out:
if (err && hdr && pfkey_error(hdr, err, sk) == 0) if (err && hdr && pfkey_error(hdr, err, sk) == 0)
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include "nf_internals.h" #include "nf_internals.h"
...@@ -11,7 +12,7 @@ ...@@ -11,7 +12,7 @@
/* Sockopts only registered and called from user context, so /* Sockopts only registered and called from user context, so
net locking would be overkill. Also, [gs]etsockopt calls may net locking would be overkill. Also, [gs]etsockopt calls may
sleep. */ sleep. */
static DECLARE_MUTEX(nf_sockopt_mutex); static DEFINE_MUTEX(nf_sockopt_mutex);
static LIST_HEAD(nf_sockopts); static LIST_HEAD(nf_sockopts);
/* Do exclusive ranges overlap? */ /* Do exclusive ranges overlap? */
...@@ -26,7 +27,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg) ...@@ -26,7 +27,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
struct list_head *i; struct list_head *i;
int ret = 0; int ret = 0;
if (down_interruptible(&nf_sockopt_mutex) != 0) if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
return -EINTR; return -EINTR;
list_for_each(i, &nf_sockopts) { list_for_each(i, &nf_sockopts) {
...@@ -48,7 +49,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg) ...@@ -48,7 +49,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
list_add(&reg->list, &nf_sockopts); list_add(&reg->list, &nf_sockopts);
out: out:
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL(nf_register_sockopt); EXPORT_SYMBOL(nf_register_sockopt);
...@@ -57,18 +58,18 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg) ...@@ -57,18 +58,18 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
{ {
/* No point being interruptible: we're probably in cleanup_module() */ /* No point being interruptible: we're probably in cleanup_module() */
restart: restart:
down(&nf_sockopt_mutex); mutex_lock(&nf_sockopt_mutex);
if (reg->use != 0) { if (reg->use != 0) {
/* To be woken by nf_sockopt call... */ /* To be woken by nf_sockopt call... */
/* FIXME: Stuart Young's name appears gratuitously. */ /* FIXME: Stuart Young's name appears gratuitously. */
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
reg->cleanup_task = current; reg->cleanup_task = current;
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
schedule(); schedule();
goto restart; goto restart;
} }
list_del(&reg->list); list_del(&reg->list);
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
} }
EXPORT_SYMBOL(nf_unregister_sockopt); EXPORT_SYMBOL(nf_unregister_sockopt);
...@@ -80,7 +81,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val, ...@@ -80,7 +81,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
struct nf_sockopt_ops *ops; struct nf_sockopt_ops *ops;
int ret; int ret;
if (down_interruptible(&nf_sockopt_mutex) != 0) if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
return -EINTR; return -EINTR;
list_for_each(i, &nf_sockopts) { list_for_each(i, &nf_sockopts) {
...@@ -90,7 +91,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val, ...@@ -90,7 +91,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
if (val >= ops->get_optmin if (val >= ops->get_optmin
&& val < ops->get_optmax) { && val < ops->get_optmax) {
ops->use++; ops->use++;
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
ret = ops->get(sk, val, opt, len); ret = ops->get(sk, val, opt, len);
goto out; goto out;
} }
...@@ -98,22 +99,22 @@ static int nf_sockopt(struct sock *sk, int pf, int val, ...@@ -98,22 +99,22 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
if (val >= ops->set_optmin if (val >= ops->set_optmin
&& val < ops->set_optmax) { && val < ops->set_optmax) {
ops->use++; ops->use++;
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
ret = ops->set(sk, val, opt, *len); ret = ops->set(sk, val, opt, *len);
goto out; goto out;
} }
} }
} }
} }
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT; return -ENOPROTOOPT;
out: out:
down(&nf_sockopt_mutex); mutex_lock(&nf_sockopt_mutex);
ops->use--; ops->use--;
if (ops->cleanup_task) if (ops->cleanup_task)
wake_up_process(ops->cleanup_task); wake_up_process(ops->cleanup_task);
up(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
return ret; return ret;
} }
......
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/wanrouter.h> #include <linux/wanrouter.h>
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/if_frad.h> #include <linux/if_frad.h>
...@@ -826,36 +827,36 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf, ...@@ -826,36 +827,36 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf,
* with module unload. * with module unload.
*/ */
static DECLARE_MUTEX(br_ioctl_mutex); static DEFINE_MUTEX(br_ioctl_mutex);
static int (*br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL; static int (*br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
void brioctl_set(int (*hook)(unsigned int, void __user *)) void brioctl_set(int (*hook)(unsigned int, void __user *))
{ {
down(&br_ioctl_mutex); mutex_lock(&br_ioctl_mutex);
br_ioctl_hook = hook; br_ioctl_hook = hook;
up(&br_ioctl_mutex); mutex_unlock(&br_ioctl_mutex);
} }
EXPORT_SYMBOL(brioctl_set); EXPORT_SYMBOL(brioctl_set);
static DECLARE_MUTEX(vlan_ioctl_mutex); static DEFINE_MUTEX(vlan_ioctl_mutex);
static int (*vlan_ioctl_hook)(void __user *arg); static int (*vlan_ioctl_hook)(void __user *arg);
void vlan_ioctl_set(int (*hook)(void __user *)) void vlan_ioctl_set(int (*hook)(void __user *))
{ {
down(&vlan_ioctl_mutex); mutex_lock(&vlan_ioctl_mutex);
vlan_ioctl_hook = hook; vlan_ioctl_hook = hook;
up(&vlan_ioctl_mutex); mutex_unlock(&vlan_ioctl_mutex);
} }
EXPORT_SYMBOL(vlan_ioctl_set); EXPORT_SYMBOL(vlan_ioctl_set);
static DECLARE_MUTEX(dlci_ioctl_mutex); static DEFINE_MUTEX(dlci_ioctl_mutex);
static int (*dlci_ioctl_hook)(unsigned int, void __user *); static int (*dlci_ioctl_hook)(unsigned int, void __user *);
void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)) void dlci_ioctl_set(int (*hook)(unsigned int, void __user *))
{ {
down(&dlci_ioctl_mutex); mutex_lock(&dlci_ioctl_mutex);
dlci_ioctl_hook = hook; dlci_ioctl_hook = hook;
up(&dlci_ioctl_mutex); mutex_unlock(&dlci_ioctl_mutex);
} }
EXPORT_SYMBOL(dlci_ioctl_set); EXPORT_SYMBOL(dlci_ioctl_set);
...@@ -899,10 +900,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -899,10 +900,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!br_ioctl_hook) if (!br_ioctl_hook)
request_module("bridge"); request_module("bridge");
down(&br_ioctl_mutex); mutex_lock(&br_ioctl_mutex);
if (br_ioctl_hook) if (br_ioctl_hook)
err = br_ioctl_hook(cmd, argp); err = br_ioctl_hook(cmd, argp);
up(&br_ioctl_mutex); mutex_unlock(&br_ioctl_mutex);
break; break;
case SIOCGIFVLAN: case SIOCGIFVLAN:
case SIOCSIFVLAN: case SIOCSIFVLAN:
...@@ -910,10 +911,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -910,10 +911,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!vlan_ioctl_hook) if (!vlan_ioctl_hook)
request_module("8021q"); request_module("8021q");
down(&vlan_ioctl_mutex); mutex_lock(&vlan_ioctl_mutex);
if (vlan_ioctl_hook) if (vlan_ioctl_hook)
err = vlan_ioctl_hook(argp); err = vlan_ioctl_hook(argp);
up(&vlan_ioctl_mutex); mutex_unlock(&vlan_ioctl_mutex);
break; break;
case SIOCGIFDIVERT: case SIOCGIFDIVERT:
case SIOCSIFDIVERT: case SIOCSIFDIVERT:
...@@ -927,9 +928,9 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -927,9 +928,9 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
request_module("dlci"); request_module("dlci");
if (dlci_ioctl_hook) { if (dlci_ioctl_hook) {
down(&dlci_ioctl_mutex); mutex_lock(&dlci_ioctl_mutex);
err = dlci_ioctl_hook(cmd, argp); err = dlci_ioctl_hook(cmd, argp);
up(&dlci_ioctl_mutex); mutex_unlock(&dlci_ioctl_mutex);
} }
break; break;
default: default:
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h>
#include <asm/ioctls.h> #include <asm/ioctls.h>
#include <linux/sunrpc/types.h> #include <linux/sunrpc/types.h>
#include <linux/sunrpc/cache.h> #include <linux/sunrpc/cache.h>
...@@ -532,7 +533,7 @@ void cache_clean_deferred(void *owner) ...@@ -532,7 +533,7 @@ void cache_clean_deferred(void *owner)
*/ */
static DEFINE_SPINLOCK(queue_lock); static DEFINE_SPINLOCK(queue_lock);
static DECLARE_MUTEX(queue_io_sem); static DEFINE_MUTEX(queue_io_mutex);
struct cache_queue { struct cache_queue {
struct list_head list; struct list_head list;
...@@ -561,7 +562,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) ...@@ -561,7 +562,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
if (count == 0) if (count == 0)
return 0; return 0;
down(&queue_io_sem); /* protect against multiple concurrent mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
* readers on this file */ * readers on this file */
again: again:
spin_lock(&queue_lock); spin_lock(&queue_lock);
...@@ -574,7 +575,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) ...@@ -574,7 +575,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
} }
if (rp->q.list.next == &cd->queue) { if (rp->q.list.next == &cd->queue) {
spin_unlock(&queue_lock); spin_unlock(&queue_lock);
up(&queue_io_sem); mutex_unlock(&queue_io_mutex);
BUG_ON(rp->offset); BUG_ON(rp->offset);
return 0; return 0;
} }
...@@ -621,11 +622,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) ...@@ -621,11 +622,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
} }
if (err == -EAGAIN) if (err == -EAGAIN)
goto again; goto again;
up(&queue_io_sem); mutex_unlock(&queue_io_mutex);
return err ? err : count; return err ? err : count;
} }
static char write_buf[8192]; /* protected by queue_io_sem */ static char write_buf[8192]; /* protected by queue_io_mutex */
static ssize_t static ssize_t
cache_write(struct file *filp, const char __user *buf, size_t count, cache_write(struct file *filp, const char __user *buf, size_t count,
...@@ -639,10 +640,10 @@ cache_write(struct file *filp, const char __user *buf, size_t count, ...@@ -639,10 +640,10 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
if (count >= sizeof(write_buf)) if (count >= sizeof(write_buf))
return -EINVAL; return -EINVAL;
down(&queue_io_sem); mutex_lock(&queue_io_mutex);
if (copy_from_user(write_buf, buf, count)) { if (copy_from_user(write_buf, buf, count)) {
up(&queue_io_sem); mutex_unlock(&queue_io_mutex);
return -EFAULT; return -EFAULT;
} }
write_buf[count] = '\0'; write_buf[count] = '\0';
...@@ -651,7 +652,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count, ...@@ -651,7 +652,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
else else
err = -EINVAL; err = -EINVAL;
up(&queue_io_sem); mutex_unlock(&queue_io_mutex);
return err ? err : count; return err ? err : count;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/sunrpc/clnt.h> #include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/xprt.h> #include <linux/sunrpc/xprt.h>
...@@ -62,7 +63,7 @@ static LIST_HEAD(all_tasks); ...@@ -62,7 +63,7 @@ static LIST_HEAD(all_tasks);
/* /*
* rpciod-related stuff * rpciod-related stuff
*/ */
static DECLARE_MUTEX(rpciod_sema); static DEFINE_MUTEX(rpciod_mutex);
static unsigned int rpciod_users; static unsigned int rpciod_users;
static struct workqueue_struct *rpciod_workqueue; static struct workqueue_struct *rpciod_workqueue;
...@@ -1047,7 +1048,7 @@ rpciod_up(void) ...@@ -1047,7 +1048,7 @@ rpciod_up(void)
struct workqueue_struct *wq; struct workqueue_struct *wq;
int error = 0; int error = 0;
down(&rpciod_sema); mutex_lock(&rpciod_mutex);
dprintk("rpciod_up: users %d\n", rpciod_users); dprintk("rpciod_up: users %d\n", rpciod_users);
rpciod_users++; rpciod_users++;
if (rpciod_workqueue) if (rpciod_workqueue)
...@@ -1070,14 +1071,14 @@ rpciod_up(void) ...@@ -1070,14 +1071,14 @@ rpciod_up(void)
rpciod_workqueue = wq; rpciod_workqueue = wq;
error = 0; error = 0;
out: out:
up(&rpciod_sema); mutex_unlock(&rpciod_mutex);
return error; return error;
} }
void void
rpciod_down(void) rpciod_down(void)
{ {
down(&rpciod_sema); mutex_lock(&rpciod_mutex);
dprintk("rpciod_down sema %d\n", rpciod_users); dprintk("rpciod_down sema %d\n", rpciod_users);
if (rpciod_users) { if (rpciod_users) {
if (--rpciod_users) if (--rpciod_users)
...@@ -1094,7 +1095,7 @@ rpciod_down(void) ...@@ -1094,7 +1095,7 @@ rpciod_down(void)
destroy_workqueue(rpciod_workqueue); destroy_workqueue(rpciod_workqueue);
rpciod_workqueue = NULL; rpciod_workqueue = NULL;
out: out:
up(&rpciod_sema); mutex_unlock(&rpciod_mutex);
} }
#ifdef RPC_DEBUG #ifdef RPC_DEBUG
......
...@@ -76,6 +76,7 @@ ...@@ -76,6 +76,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/af_unix.h> #include <net/af_unix.h>
...@@ -169,7 +170,7 @@ static void maybe_unmark_and_push(struct sock *x) ...@@ -169,7 +170,7 @@ static void maybe_unmark_and_push(struct sock *x)
void unix_gc(void) void unix_gc(void)
{ {
static DECLARE_MUTEX(unix_gc_sem); static DEFINE_MUTEX(unix_gc_sem);
int i; int i;
struct sock *s; struct sock *s;
struct sk_buff_head hitlist; struct sk_buff_head hitlist;
...@@ -179,7 +180,7 @@ void unix_gc(void) ...@@ -179,7 +180,7 @@ void unix_gc(void)
* Avoid a recursive GC. * Avoid a recursive GC.
*/ */
if (down_trylock(&unix_gc_sem)) if (!mutex_trylock(&unix_gc_sem))
return; return;
spin_lock(&unix_table_lock); spin_lock(&unix_table_lock);
...@@ -308,5 +309,5 @@ void unix_gc(void) ...@@ -308,5 +309,5 @@ void unix_gc(void)
*/ */
__skb_queue_purge(&hitlist); __skb_queue_purge(&hitlist);
up(&unix_gc_sem); mutex_unlock(&unix_gc_sem);
} }
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/ip.h> #include <net/ip.h>
DECLARE_MUTEX(xfrm_cfg_sem); DEFINE_MUTEX(xfrm_cfg_mutex);
EXPORT_SYMBOL(xfrm_cfg_sem); EXPORT_SYMBOL(xfrm_cfg_mutex);
static DEFINE_RWLOCK(xfrm_policy_lock); static DEFINE_RWLOCK(xfrm_policy_lock);
......
...@@ -1486,9 +1486,9 @@ static void xfrm_netlink_rcv(struct sock *sk, int len) ...@@ -1486,9 +1486,9 @@ static void xfrm_netlink_rcv(struct sock *sk, int len)
unsigned int qlen = 0; unsigned int qlen = 0;
do { do {
down(&xfrm_cfg_sem); mutex_lock(&xfrm_cfg_mutex);
netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg); netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
up(&xfrm_cfg_sem); mutex_unlock(&xfrm_cfg_mutex);
} while (qlen); } while (qlen);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment