Commit 4ae1e19f authored by Vasu Dev's avatar Vasu Dev Committed by James Bottomley

[SCSI] libfc: fix an issue of pending exch/es after i/f destroyed or rmmod fcoe

All exches must be freed before its EM mempool destroyed in this
case but currently some exches could be still pending in their
scheduled delayed work after EM mempool is destroyed causing
this issue discussed and reported in this latest email thread:-

 http://www.open-fcoe.org/pipermail/devel/2009-October/004788.html

This patch fixes this issue by adding dedicated work queue thread
fc_exch_workqueue for exch delayed work and then flush this work
queue before destroying EM mempool.

The cancel_delayed_work_sync cannot be called during final
fc_exch_reset due to lport and exch locking ordering, so removes
related comment block not relevant any more with this patch.
Reported-by: default avatarJoe Eykholt <jeykholt@cisco.com>
Signed-off-by: default avatarVasu Dev <vasu.dev@intel.com>
Signed-off-by: default avatarRobert Love <robert.w.love@intel.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 18fa11ef
...@@ -38,6 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */ ...@@ -38,6 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask); EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
struct workqueue_struct *fc_exch_workqueue;
/* /*
* Structure and function definitions for managing Fibre Channel Exchanges * Structure and function definitions for managing Fibre Channel Exchanges
...@@ -427,7 +428,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, ...@@ -427,7 +428,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
FC_EXCH_DBG(ep, "Exchange timer armed\n"); FC_EXCH_DBG(ep, "Exchange timer armed\n");
if (schedule_delayed_work(&ep->timeout_work, if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
msecs_to_jiffies(timer_msec))) msecs_to_jiffies(timer_msec)))
fc_exch_hold(ep); /* hold for timer */ fc_exch_hold(ep); /* hold for timer */
} }
...@@ -1619,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep) ...@@ -1619,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep)
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
ep->state |= FC_EX_RST_CLEANUP; ep->state |= FC_EX_RST_CLEANUP;
/*
* we really want to call del_timer_sync, but cannot due
* to the lport calling with the lport lock held (some resp
* functions can also grab the lport lock which could cause
* a deadlock).
*/
if (cancel_delayed_work(&ep->timeout_work)) if (cancel_delayed_work(&ep->timeout_work))
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
resp = ep->resp; resp = ep->resp;
...@@ -2203,6 +2198,7 @@ void fc_exch_mgr_free(struct fc_lport *lport) ...@@ -2203,6 +2198,7 @@ void fc_exch_mgr_free(struct fc_lport *lport)
{ {
struct fc_exch_mgr_anchor *ema, *next; struct fc_exch_mgr_anchor *ema, *next;
flush_workqueue(fc_exch_workqueue);
list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
fc_exch_mgr_del(ema); fc_exch_mgr_del(ema);
} }
...@@ -2338,6 +2334,9 @@ int fc_setup_exch_mgr() ...@@ -2338,6 +2334,9 @@ int fc_setup_exch_mgr()
} }
fc_cpu_mask--; fc_cpu_mask--;
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
if (!fc_exch_workqueue)
return -ENOMEM;
return 0; return 0;
} }
...@@ -2346,5 +2345,6 @@ int fc_setup_exch_mgr() ...@@ -2346,5 +2345,6 @@ int fc_setup_exch_mgr()
*/ */
void fc_destroy_exch_mgr() void fc_destroy_exch_mgr()
{ {
destroy_workqueue(fc_exch_workqueue);
kmem_cache_destroy(fc_em_cachep); kmem_cache_destroy(fc_em_cachep);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment