Commit 2ca32b48 authored by Tejun Heo's avatar Tejun Heo Committed by James Bottomley

[SCSI] fcoe: use dedicated workqueue instead of system_wq

fcoe uses the system_wq to destroy ports and the work items need to be
flushed before the driver is unloaded.  As the work items free the
containing data structure, they can't be flushed directly.  The
workqueue should be flushed instead.

Also, the destruction works can be chained - ie. destruction of a port
may lead to destruction of another port where the work item for the
former queues the work for the latter.  Currently, the depth of chain
can be at most two and fcoe_exit() makes sure everything is complete
by calling flush_scheduled_work() twice.

With commit c8efcc25 (workqueue: allow chained queueing during
destruction), destroy_workqueue() can take care of chained works on
workqueue destruction.  Add and use fcoe_wq instead.  Simply
destroying fcoe_wq on driver unload takes care of flushing.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarRobert Love <robert.w.love@intel.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 75a2792d
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/workqueue.h>
#include <scsi/scsi_tcq.h> #include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h> #include <scsi/scsicam.h>
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
...@@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ ...@@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
DEFINE_MUTEX(fcoe_config_mutex); DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
static DECLARE_COMPLETION(fcoe_flush_completion); static DECLARE_COMPLETION(fcoe_flush_completion);
...@@ -1896,7 +1899,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, ...@@ -1896,7 +1899,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_del(&fcoe->list); list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp); port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe); fcoe_interface_cleanup(fcoe);
schedule_work(&port->destroy_work); queue_work(fcoe_wq, &port->destroy_work);
goto out; goto out;
break; break;
case NETDEV_FEAT_CHANGE: case NETDEV_FEAT_CHANGE:
...@@ -2387,6 +2390,10 @@ static int __init fcoe_init(void) ...@@ -2387,6 +2390,10 @@ static int __init fcoe_init(void)
unsigned int cpu; unsigned int cpu;
int rc = 0; int rc = 0;
fcoe_wq = alloc_workqueue("fcoe", 0, 0);
if (!fcoe_wq)
return -ENOMEM;
/* register as a fcoe transport */ /* register as a fcoe transport */
rc = fcoe_transport_attach(&fcoe_sw_transport); rc = fcoe_transport_attach(&fcoe_sw_transport);
if (rc) { if (rc) {
...@@ -2425,6 +2432,7 @@ static int __init fcoe_init(void) ...@@ -2425,6 +2432,7 @@ static int __init fcoe_init(void)
fcoe_percpu_thread_destroy(cpu); fcoe_percpu_thread_destroy(cpu);
} }
mutex_unlock(&fcoe_config_mutex); mutex_unlock(&fcoe_config_mutex);
destroy_workqueue(fcoe_wq);
return rc; return rc;
} }
module_init(fcoe_init); module_init(fcoe_init);
...@@ -2450,7 +2458,7 @@ static void __exit fcoe_exit(void) ...@@ -2450,7 +2458,7 @@ static void __exit fcoe_exit(void)
list_del(&fcoe->list); list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp); port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe); fcoe_interface_cleanup(fcoe);
schedule_work(&port->destroy_work); queue_work(fcoe_wq, &port->destroy_work);
} }
rtnl_unlock(); rtnl_unlock();
...@@ -2461,15 +2469,17 @@ static void __exit fcoe_exit(void) ...@@ -2461,15 +2469,17 @@ static void __exit fcoe_exit(void)
mutex_unlock(&fcoe_config_mutex); mutex_unlock(&fcoe_config_mutex);
/* flush any asyncronous interface destroys, /*
* this should happen after the netdev notifier is unregistered */ * destroy_work's may be chained but destroy_workqueue()
flush_scheduled_work(); * can take care of them. Just kill the fcoe_wq.
/* That will flush out all the N_Ports on the hostlist, but now we */
* may have NPIV VN_Ports scheduled for destruction */ destroy_workqueue(fcoe_wq);
flush_scheduled_work();
/* detach from scsi transport /*
* must happen after all destroys are done, therefor after the flush */ * Detaching from the scsi transport must happen after all
* destroys are done on the fcoe_wq. destroy_workqueue will
* enusre the fcoe_wq is flushed.
*/
fcoe_if_exit(); fcoe_if_exit();
/* detach from fcoe transport */ /* detach from fcoe transport */
...@@ -2618,7 +2628,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport) ...@@ -2618,7 +2628,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_lock(&n_port->lp_mutex); mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list); list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex); mutex_unlock(&n_port->lp_mutex);
schedule_work(&port->destroy_work); queue_work(fcoe_wq, &port->destroy_work);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment