Commit 9bb36777 authored by Justin Tee's avatar Justin Tee Committed by Martin K. Petersen

scsi: lpfc: Protect vport fc_nodes list with an explicit spin lock

In attempt to reduce the amount of unnecessary shost_lock acquisitions in
the lpfc driver, replace shost_lock with an explicit fc_nodes_list_lock
spinlock when accessing vport->fc_nodes lists.  Although vport memory
region is owned by shost->hostdata, it is driver private memory and an
explicit fc_nodes list lock for fc_nodes list mutations is more appropriate
than locking the entire shost.
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20240131185112.149731-14-justintee8345@gmail.comReviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0dfd9cbc
...@@ -587,6 +587,7 @@ struct lpfc_vport { ...@@ -587,6 +587,7 @@ struct lpfc_vport {
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */ #define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
struct list_head fc_nodes; struct list_head fc_nodes;
spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
/* Keep counters for the number of entries in each list. */ /* Keep counters for the number of entries in each list. */
atomic_t fc_plogi_cnt; atomic_t fc_plogi_cnt;
......
...@@ -344,6 +344,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -344,6 +344,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_fc4_ctrl_stat *cstat; struct lpfc_fc4_ctrl_stat *cstat;
uint64_t data1, data2, data3; uint64_t data1, data2, data3;
uint64_t totin, totout, tot; uint64_t totin, totout, tot;
unsigned long iflags;
char *statep; char *statep;
int i; int i;
int len = 0; int len = 0;
...@@ -543,7 +544,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -543,7 +544,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done; goto buffer_done;
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL; nrport = NULL;
...@@ -617,7 +618,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -617,7 +618,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto unlock_buf_done; goto unlock_buf_done;
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
if (!lport) if (!lport)
goto buffer_done; goto buffer_done;
...@@ -681,7 +682,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -681,7 +682,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done; goto buffer_done;
unlock_buf_done: unlock_buf_done:
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
buffer_done: buffer_done:
len = strnlen(buf, PAGE_SIZE); len = strnlen(buf, PAGE_SIZE);
...@@ -3765,15 +3766,14 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) ...@@ -3765,15 +3766,14 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
static void static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{ {
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
unsigned long iflags;
#if (IS_ENABLED(CONFIG_NVME_FC)) #if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport; struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport = NULL; struct nvme_fc_remote_port *remoteport = NULL;
#endif #endif
shost = lpfc_shost_from_vport(vport); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->rport) if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
...@@ -3788,7 +3788,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) ...@@ -3788,7 +3788,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
vport->cfg_devloss_tmo); vport->cfg_devloss_tmo);
#endif #endif
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
} }
/** /**
...@@ -3974,8 +3974,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, ...@@ -3974,8 +3974,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
static int static int
lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
unsigned long iflags;
if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH)) if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
return -EINVAL; return -EINVAL;
...@@ -3983,14 +3983,13 @@ lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) ...@@ -3983,14 +3983,13 @@ lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
if (val == vport->cfg_tgt_queue_depth) if (val == vport->cfg_tgt_queue_depth)
return 0; return 0;
spin_lock_irq(shost->host_lock);
vport->cfg_tgt_queue_depth = val; vport->cfg_tgt_queue_depth = val;
/* Next loop thru nodelist and change cmd_qdepth */ /* Next loop thru nodelist and change cmd_qdepth */
spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
spin_unlock_irq(shost->host_lock);
return 0; return 0;
} }
...@@ -5236,8 +5235,8 @@ lpfc_vport_param_show(max_scsicmpl_time); ...@@ -5236,8 +5235,8 @@ lpfc_vport_param_show(max_scsicmpl_time);
static int static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_nodelist *ndlp, *next_ndlp;
unsigned long iflags;
if (val == vport->cfg_max_scsicmpl_time) if (val == vport->cfg_max_scsicmpl_time)
return 0; return 0;
...@@ -5245,13 +5244,13 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) ...@@ -5245,13 +5244,13 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
return -EINVAL; return -EINVAL;
vport->cfg_max_scsicmpl_time = val; vport->cfg_max_scsicmpl_time = val;
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue; continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0; return 0;
} }
lpfc_vport_param_store(max_scsicmpl_time); lpfc_vport_param_store(max_scsicmpl_time);
...@@ -6853,17 +6852,19 @@ lpfc_get_node_by_target(struct scsi_target *starget) ...@@ -6853,17 +6852,19 @@ lpfc_get_node_by_target(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
unsigned long iflags;
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
/* Search for this, mapped, target ID */ /* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) { starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
iflags);
return ndlp; return ndlp;
} }
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return NULL; return NULL;
} }
......
...@@ -1853,11 +1853,10 @@ static uint32_t ...@@ -1853,11 +1853,10 @@ static uint32_t
lpfc_find_map_node(struct lpfc_vport *vport) lpfc_find_map_node(struct lpfc_vport *vport)
{ {
struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_nodelist *ndlp, *next_ndlp;
struct Scsi_Host *shost; unsigned long iflags;
uint32_t cnt = 0; uint32_t cnt = 0;
shost = lpfc_shost_from_vport(vport); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
spin_lock_irq(shost->host_lock);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_type & NLP_FABRIC) if (ndlp->nlp_type & NLP_FABRIC)
continue; continue;
...@@ -1865,7 +1864,7 @@ lpfc_find_map_node(struct lpfc_vport *vport) ...@@ -1865,7 +1864,7 @@ lpfc_find_map_node(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)) (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
cnt++; cnt++;
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return cnt; return cnt;
} }
......
...@@ -806,10 +806,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -806,10 +806,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{ {
int len = 0; int len = 0;
int i, iocnt, outio, cnt; int i, iocnt, outio, cnt;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
unsigned char *statep; unsigned char *statep;
unsigned long iflags;
struct nvme_fc_local_port *localport; struct nvme_fc_local_port *localport;
struct nvme_fc_remote_port *nrport = NULL; struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport; struct lpfc_nvme_rport *rport;
...@@ -818,7 +818,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -818,7 +818,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
outio = 0; outio = 0;
len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
iocnt = 0; iocnt = 0;
if (!cnt) { if (!cnt) {
...@@ -908,7 +908,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -908,7 +908,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_defer_did); ndlp->nlp_defer_did);
len += scnprintf(buf+len, size-len, "\n"); len += scnprintf(buf+len, size-len, "\n");
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
len += scnprintf(buf + len, size - len, len += scnprintf(buf + len, size - len,
"\nOutstanding IO x%x\n", outio); "\nOutstanding IO x%x\n", outio);
...@@ -940,8 +940,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -940,8 +940,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (!localport) if (!localport)
goto out_exit; goto out_exit;
spin_lock_irq(shost->host_lock);
/* Port state is only one of two values for now. */ /* Port state is only one of two values for now. */
if (localport->port_id) if (localport->port_id)
statep = "ONLINE"; statep = "ONLINE";
...@@ -953,6 +951,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -953,6 +951,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
localport->port_id, statep); localport->port_id, statep);
len += scnprintf(buf + len, size - len, "\tRport List:\n"); len += scnprintf(buf + len, size - len, "\tRport List:\n");
spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */ /* local short-hand pointer. */
spin_lock(&ndlp->lock); spin_lock(&ndlp->lock);
...@@ -1006,8 +1005,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -1006,8 +1005,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
/* Terminate the string. */ /* Terminate the string. */
len += scnprintf(buf + len, size - len, "\n"); len += scnprintf(buf + len, size - len, "\n");
} }
spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
spin_unlock_irq(shost->host_lock);
out_exit: out_exit:
return len; return len;
} }
......
...@@ -4860,10 +4860,10 @@ void ...@@ -4860,10 +4860,10 @@ void
lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int state) int state)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state; int old_state = ndlp->nlp_state;
int node_dropped = ndlp->nlp_flag & NLP_DROPPED; int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
char name1[16], name2[16]; char name1[16], name2[16];
unsigned long iflags;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0904 NPort state transition x%06x, %s -> %s\n", "0904 NPort state transition x%06x, %s -> %s\n",
...@@ -4890,9 +4890,9 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -4890,9 +4890,9 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
} }
if (list_empty(&ndlp->nlp_listp)) { if (list_empty(&ndlp->nlp_listp)) {
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
} else if (old_state) } else if (old_state)
lpfc_nlp_counters(vport, old_state, -1); lpfc_nlp_counters(vport, old_state, -1);
...@@ -4904,26 +4904,26 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -4904,26 +4904,26 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void void
lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long iflags;
if (list_empty(&ndlp->nlp_listp)) { if (list_empty(&ndlp->nlp_listp)) {
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
} }
} }
void void
lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long iflags;
lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1); lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_del_init(&ndlp->nlp_listp); list_del_init(&ndlp->nlp_listp);
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
NLP_STE_UNUSED_NODE); NLP_STE_UNUSED_NODE);
} }
...@@ -5421,8 +5421,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) ...@@ -5421,8 +5421,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
{ {
struct lpfc_vport **vports; struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
int i; int i;
unsigned long iflags;
vports = lpfc_create_vport_work_array(phba); vports = lpfc_create_vport_work_array(phba);
if (!vports) { if (!vports) {
...@@ -5431,17 +5431,18 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) ...@@ -5431,17 +5431,18 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
return; return;
} }
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]); spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
/* The mempool_alloc might sleep */ /* The mempool_alloc might sleep */
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
iflags);
lpfc_unreg_rpi(vports[i], ndlp); lpfc_unreg_rpi(vports[i], ndlp);
spin_lock_irq(shost->host_lock); spin_lock_irqsave(&vports[i]->fc_nodes_list_lock,
iflags);
} }
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
} }
lpfc_destroy_vport_work_array(phba, vports); lpfc_destroy_vport_work_array(phba, vports);
} }
...@@ -5683,12 +5684,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ...@@ -5683,12 +5684,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
struct lpfc_nodelist * struct lpfc_nodelist *
lpfc_findnode_mapped(struct lpfc_vport *vport) lpfc_findnode_mapped(struct lpfc_vport *vport)
{ {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
uint32_t data1; uint32_t data1;
unsigned long iflags; unsigned long iflags;
spin_lock_irqsave(shost->host_lock, iflags); spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
...@@ -5697,7 +5697,8 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) ...@@ -5697,7 +5697,8 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
((uint32_t)ndlp->nlp_xri << 16) | ((uint32_t)ndlp->nlp_xri << 16) |
((uint32_t)ndlp->nlp_type << 8) | ((uint32_t)ndlp->nlp_type << 8) |
((uint32_t)ndlp->nlp_rpi & 0xff)); ((uint32_t)ndlp->nlp_rpi & 0xff));
spin_unlock_irqrestore(shost->host_lock, iflags); spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
iflags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
"2025 FIND node DID MAPPED " "2025 FIND node DID MAPPED "
"Data: x%px x%x x%x x%x x%px\n", "Data: x%px x%x x%x x%x x%px\n",
...@@ -5707,7 +5708,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) ...@@ -5707,7 +5708,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
return ndlp; return ndlp;
} }
} }
spin_unlock_irqrestore(shost->host_lock, iflags); spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
/* FIND node did <did> NOT FOUND */ /* FIND node did <did> NOT FOUND */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
...@@ -6742,7 +6743,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ...@@ -6742,7 +6743,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
struct lpfc_vport **vports; struct lpfc_vport **vports;
int i, ret = 0; int i, ret = 0;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost; unsigned long iflags;
vports = lpfc_create_vport_work_array(phba); vports = lpfc_create_vport_work_array(phba);
...@@ -6751,8 +6752,6 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ...@@ -6751,8 +6752,6 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
return 1; return 1;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
/* /*
* IF the CVL_RCVD bit is not set then we have sent the * IF the CVL_RCVD bit is not set then we have sent the
* flogi. * flogi.
...@@ -6760,15 +6759,16 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ...@@ -6760,15 +6759,16 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
* unreg the fcf. * unreg the fcf.
*/ */
if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
spin_unlock_irq(shost->host_lock);
ret = 1; ret = 1;
goto out; goto out;
} }
spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (ndlp->rport && if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1; ret = 1;
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
iflags);
goto out; goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1; ret = 1;
...@@ -6780,7 +6780,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ...@@ -6780,7 +6780,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
ndlp->nlp_flag); ndlp->nlp_flag);
} }
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
} }
out: out:
lpfc_destroy_vport_work_array(phba, vports); lpfc_destroy_vport_work_array(phba, vports);
......
...@@ -3829,7 +3829,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) ...@@ -3829,7 +3829,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
vports[i]->fc_flag &= ~FC_VFI_REGISTERED; vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
shost = lpfc_shost_from_vport(vports[i]);
list_for_each_entry_safe(ndlp, next_ndlp, list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes, &vports[i]->fc_nodes,
nlp_listp) { nlp_listp) {
...@@ -4833,6 +4832,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) ...@@ -4833,6 +4832,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Initialize all internally managed lists. */ /* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes); INIT_LIST_HEAD(&vport->fc_nodes);
spin_lock_init(&vport->fc_nodes_list_lock);
INIT_LIST_HEAD(&vport->rcv_buffer_list); INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock); spin_lock_init(&vport->work_port_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment