Commit 7fc1ece4 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

cnic: Fix locking in init/exit calls.

The slow path ulp_init and ulp_exit calls to the bnx2i driver
are sleepable calls and therefore should not be protected using
rcu_read_lock.  Fix it by using mutex and refcount during these
calls.  cnic_unregister_driver() will now wait for the refcount
to go to zero before completing the call.
Signed-off-by: default avatarMichael Chan <mchan@broadcom.com>
Reviewed-by: default avatarBenjamin Li <benli@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 681dbd71
...@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) ...@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
return NULL; return NULL;
} }
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
{
atomic_inc(&ulp_ops->ref_count);
}
static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
{
atomic_dec(&ulp_ops->ref_count);
}
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
...@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) ...@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
} }
read_unlock(&cnic_dev_lock); read_unlock(&cnic_dev_lock);
atomic_set(&ulp_ops->ref_count, 0);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
mutex_unlock(&cnic_lock); mutex_unlock(&cnic_lock);
...@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) ...@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
int cnic_unregister_driver(int ulp_type) int cnic_unregister_driver(int ulp_type)
{ {
struct cnic_dev *dev; struct cnic_dev *dev;
struct cnic_ulp_ops *ulp_ops;
int i = 0;
if (ulp_type >= MAX_CNIC_ULP_TYPE) { if (ulp_type >= MAX_CNIC_ULP_TYPE) {
printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
...@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type) ...@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL; return -EINVAL;
} }
mutex_lock(&cnic_lock); mutex_lock(&cnic_lock);
if (!cnic_ulp_tbl[ulp_type]) { ulp_ops = cnic_ulp_tbl[ulp_type];
if (!ulp_ops) {
printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
"been registered\n", ulp_type); "been registered\n", ulp_type);
goto out_unlock; goto out_unlock;
...@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type) ...@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
mutex_unlock(&cnic_lock); mutex_unlock(&cnic_lock);
synchronize_rcu(); synchronize_rcu();
while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
msleep(100);
i++;
}
if (atomic_read(&ulp_ops->ref_count) != 0)
printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
" to zero.\n", dev->netdev->name);
return 0; return 0;
out_unlock: out_unlock:
...@@ -1161,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev) ...@@ -1161,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
int i; int i;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
rcu_read_lock();
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops; struct cnic_ulp_ops *ulp_ops;
ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); mutex_lock(&cnic_lock);
if (!ulp_ops || !ulp_ops->cnic_init) ulp_ops = cnic_ulp_tbl[i];
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue; continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_init(dev); ulp_ops->cnic_init(dev);
ulp_put(ulp_ops);
} }
rcu_read_unlock();
} }
static void cnic_ulp_exit(struct cnic_dev *dev) static void cnic_ulp_exit(struct cnic_dev *dev)
...@@ -1181,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev) ...@@ -1181,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
int i; int i;
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
rcu_read_lock();
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops; struct cnic_ulp_ops *ulp_ops;
ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); mutex_lock(&cnic_lock);
if (!ulp_ops || !ulp_ops->cnic_exit) ulp_ops = cnic_ulp_tbl[i];
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue; continue;
}
ulp_get(ulp_ops);
mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_exit(dev); ulp_ops->cnic_exit(dev);
ulp_put(ulp_ops);
} }
rcu_read_unlock();
} }
static int cnic_cm_offload_pg(struct cnic_sock *csk) static int cnic_cm_offload_pg(struct cnic_sock *csk)
......
...@@ -290,6 +290,7 @@ struct cnic_ulp_ops { ...@@ -290,6 +290,7 @@ struct cnic_ulp_ops {
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size); char *data, u16 data_size);
struct module *owner; struct module *owner;
atomic_t ref_count;
}; };
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment