Commit 92d7f7b0 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3

NPIV support is added to the driver.  It utilizes the interfaces of
the fc transport for the creation and deletion of vports. Within the
driver, a new Scsi_Host is created for each NPIV instance, and is
paired with a new instance of a FC port.  This allows N FC Port
elements to share a single Adapter.
Signed-off-by: default avatarJames Smart <James.Smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent ed957684
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
# * Copyright (C) 2004-2005 Emulex. All rights reserved. *
# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
......@@ -27,4 +27,5 @@ endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o
lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
lpfc_vport.o
......@@ -34,6 +34,17 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
* resource error.
*/
#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
......@@ -97,6 +108,29 @@ typedef struct lpfc_vpd {
uint32_t sli2FwRev;
uint8_t sli2FwName[16];
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd2 :24; /* Reserved */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t chbs : 1; /* Cofigure Host Backing store */
uint32_t cinb : 1; /* Enable Interrupt Notification Block */
uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
uint32_t cmx : 1; /* Configure Max XRIs */
uint32_t cmr : 1; /* Configure Max RPIs */
#else /* __LITTLE_ENDIAN */
uint32_t cmr : 1; /* Configure Max RPIs */
uint32_t cmx : 1; /* Configure Max XRIs */
uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
uint32_t cinb : 1; /* Enable Interrupt Notification Block */
uint32_t chbs : 1; /* Cofigure Host Backing store */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t rsvd2 :24; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
struct lpfc_scsi_buf;
......@@ -129,6 +163,7 @@ struct lpfc_stats {
uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
uint32_t elsXmitPRLI;
uint32_t elsXmitADISC;
......@@ -174,18 +209,21 @@ struct lpfc_sysfs_mbox {
struct lpfc_hba;
enum discovery_state {
LPFC_STATE_UNKNOWN = 0, /* HBA state is unknown */
LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
LPFC_FABRIC_CFG_LINK = 8, /* Fabric assigned NPORT Id
* configured */
LPFC_NS_REG = 9, /* Register with NameServer */
LPFC_NS_QRY = 10, /* Query NameServer for NPort ID list */
LPFC_BUILD_DISC_LIST = 11, /* Build ADISC and PLOGI lists for
* device authentication / discovery */
LPFC_DISC_AUTH = 12, /* Processing ADISC list */
LPFC_VPORT_READY = 32,
LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
LPFC_VPORT_FAILED = 1, /* vport has failed */
LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
LPFC_FDISC = 8, /* FDISC sent for vport */
LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
* configured */
LPFC_NS_REG = 10, /* Register with NameServer */
LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
* device authentication / discovery */
LPFC_DISC_AUTH = 13, /* Processing ADISC list */
LPFC_VPORT_READY = 32,
};
enum hba_state {
......@@ -195,8 +233,9 @@ enum hba_state {
LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
LPFC_CLEAR_LA = 13, /* authentication cmplt - issue
LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
* CLEAR_LA */
LPFC_HBA_READY = 32,
LPFC_HBA_ERROR = -1
};
......@@ -209,26 +248,30 @@ struct lpfc_vport {
#define LPFC_FABRIC_PORT 3
enum discovery_state port_state;
uint16_t vpi;
uint32_t fc_flag; /* FC flags */
/* Several of these flags are HBA centric and should be moved to
* phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
*/
#define FC_PT2PT 0x1 /* pt2pt with no fabric */
#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
#define FC_DISC_TMO 0x4 /* Discovery timer running */
#define FC_PUBLIC_LOOP 0x8 /* Public loop */
#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
#define FC_FABRIC 0x100 /* We are fabric attached */
#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
#define FC_PT2PT 0x1 /* pt2pt with no fabric */
#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
#define FC_DISC_TMO 0x4 /* Discovery timer running */
#define FC_PUBLIC_LOOP 0x8 /* Public loop */
#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
#define FC_FABRIC 0x100 /* We are fabric attached */
#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
struct list_head fc_nodes;
......@@ -269,6 +312,9 @@ struct lpfc_vport {
#define WORKER_ELS_TMO 0x2 /* ELS timeout */
#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
#define WORKER_FABRIC_BLOCK_TMO 0x10 /* fabric block timout */
#define WORKER_RAMP_DOWN_QUEUE 0x20 /* Decrease Q depth */
#define WORKER_RAMP_UP_QUEUE 0x40 /* Increase Q depth */
struct timer_list fc_fdmitmo;
struct timer_list els_tmofunc;
......@@ -278,10 +324,10 @@ struct lpfc_vport {
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
char *vname; /* Application assigned name */
struct fc_vport *fc_vport;
};
struct hbq_s {
uint16_t entry_count; /* Current number of HBQ slots */
uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
......@@ -289,33 +335,38 @@ struct hbq_s {
uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
};
#define MAX_HBQS 16
#define LPFC_MAX_HBQS 16
/* this matches the possition in the lpfc_hbq_defs array */
#define LPFC_ELS_HBQ 0
struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
#define LPFC_SLI3_ENABLED 0x01
#define LPFC_SLI3_HBQ_ENABLED 0x02
#define LPFC_SLI3_INB_ENABLED 0x04
#define LPFC_SLI3_ENABLED 0x01
#define LPFC_SLI3_HBQ_ENABLED 0x02
#define LPFC_SLI3_NPIV_ENABLED 0x04
#define LPFC_SLI3_VPORT_TEARDOWN 0x08
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
enum hba_state link_state;
uint32_t link_flag; /* link state flags */
#define LS_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
/* This flag is set while issuing */
/* INIT_LINK mailbox command */
#define LS_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
struct lpfc_sli2_slim *slim2p;
struct lpfc_dmabuf hbqslimp;
dma_addr_t slim2p_mapping;
uint16_t pci_cfg_value;
uint8_t work_found;
#define LPFC_MAX_WORKER_ITERATION 4
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
......@@ -325,7 +376,7 @@ struct lpfc_hba {
struct timer_list fc_estabtmo; /* link establishment timer */
/* These fields used to be binfo */
uint32_t fc_pref_DID; /* preferred D_ID */
uint8_t fc_pref_ALPA; /* preferred AL_PA */
uint8_t fc_pref_ALPA; /* preferred AL_PA */
uint32_t fc_edtov; /* E_D_TOV timer value */
uint32_t fc_arbtov; /* ARB_TOV timer value */
uint32_t fc_ratov; /* R_A_TOV timer value */
......@@ -355,6 +406,8 @@ struct lpfc_hba {
uint32_t cfg_nodev_tmo;
uint32_t cfg_devloss_tmo;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_peer_port_login;
uint32_t cfg_vport_restrict_login;
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_ack0;
......@@ -391,11 +444,9 @@ struct lpfc_hba {
wait_queue_head_t *work_wait;
struct task_struct *worker_thread;
struct hbq_dmabuf *hbq_buffer_pool;
uint32_t hbq_buffer_count;
uint32_t hbq_buff_count; /* Current hbq buffers */
struct list_head hbq_buffer_list;
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[MAX_HBQS]; /* local copy of hbq indicies */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
......@@ -413,7 +464,7 @@ struct lpfc_hba {
struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
uint32_t __iomem *hbq_get; /* Address in SLIM to HBQ get ptrs */
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
int brd_no; /* FC board number */
......@@ -464,6 +515,22 @@ struct lpfc_hba {
struct fc_host_statistics link_stats;
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
uint16_t vpi_cnt; /* Nport count */
#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
unsigned long *vpi_bmask; /* vpi allocation table */
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
unsigned long last_ramp_up_time;
};
static inline struct Scsi_Host *
......@@ -485,10 +552,9 @@ static inline int
lpfc_is_link_up(struct lpfc_hba *phba)
{
return phba->link_state == LPFC_LINK_UP ||
phba->link_state == LPFC_CLEAR_LA;
phba->link_state == LPFC_CLEAR_LA ||
phba->link_state == LPFC_HBA_READY;
}
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
......@@ -39,6 +39,7 @@
#include "lpfc_version.h"
#include "lpfc_compat.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
......@@ -139,7 +140,7 @@ lpfc_fwrev_show(struct class_device *cdev, char *buf)
char fwrev[32];
lpfc_decode_firmware_rev(phba, fwrev, 1);
return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
}
static ssize_t
......@@ -178,10 +179,11 @@ lpfc_state_show(struct class_device *cdev, char *buf)
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Down");
len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
switch (vport->port_state) {
......@@ -190,8 +192,9 @@ lpfc_state_show(struct class_device *cdev, char *buf)
break;
case LPFC_LOCAL_CFG_LINK:
len += snprintf(buf + len, PAGE_SIZE-len,
"configuring\n");
"Configuring Link\n");
break;
case LPFC_FDISC:
case LPFC_FLOGI:
case LPFC_FABRIC_CFG_LINK:
case LPFC_NS_REG:
......@@ -205,7 +208,11 @@ lpfc_state_show(struct class_device *cdev, char *buf)
len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
break;
case LPFC_STATE_UNKNOWN:
case LPFC_VPORT_FAILED:
len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
break;
case LPFC_VPORT_UNKNOWN:
len += snprintf(buf + len, PAGE_SIZE - len,
"Unknown\n");
break;
......@@ -432,6 +439,151 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
return -EIO;
}
static ssize_t
lpfc_max_vpi_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->max_vpi);
}
static ssize_t
lpfc_used_vpi_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
/* Don't count the physical port */
return snprintf(buf, PAGE_SIZE, "%d\n", phba->vpi_cnt-1);
}
int
lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mxri,
uint32_t *axri, uint32_t *mrpi, uint32_t *arpi)
{
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
/*
* prevent udev from issuing mailbox commands until the port is
* configured.
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
(phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
return 0;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return 0;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return 0;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb = &pmboxq->mb;
pmb->mbxCommand = MBX_READ_CONFIG;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = MBX_NOT_FINISHED;
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
if (rc == MBX_TIMEOUT)
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
else
mempool_free(pmboxq, phba->mbox_mem_pool);
return 0;
}
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
if (arpi)
*arpi = pmb->un.varRdConfig.avail_rpi;
if (mxri)
*mxri = pmb->un.varRdConfig.max_xri;
if (axri)
*axri = pmb->un.varRdConfig.avail_xri;
mempool_free(pmboxq, phba->mbox_mem_pool);
return 1;
}
static ssize_t
lpfc_max_rpi_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
static ssize_t
lpfc_used_rpi_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
static ssize_t
lpfc_max_xri_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt;
if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
static ssize_t
lpfc_used_xri_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
uint32_t cnt, acnt;
if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL))
return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
static ssize_t
lpfc_npiv_info_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (!(phba->max_vpi))
return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
if (vport->port_type == LPFC_PHYSICAL_PORT)
return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
}
static ssize_t
lpfc_poll_show(struct class_device *cdev, char *buf)
{
......@@ -640,6 +792,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
......@@ -829,6 +988,17 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
int lpfc_sli_mode = 0;
module_param(lpfc_sli_mode, int, 0);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
int lpfc_npiv_enable = 0;
module_param(lpfc_npiv_enable, int, 0);
MODULE_PARM_DESC(lpfc_npiv_enable, "Enable NPIV functionality");
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
......@@ -984,6 +1154,33 @@ LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
"Max number of FCP commands we can queue to a lpfc HBA");
/*
# peer_port_login: This parameter allows/prevents logins
# between peer ports hosted on the same physical port.
# When this parameter is set 0 peer ports of same physical port
# are not allowed to login to each other.
# When this parameter is set 1 peer ports of same physical port
# are allowed to login to each other.
# Default value of this parameter is 0.
*/
LPFC_ATTR_R(peer_port_login, 0, 0, 1,
"Allow peer ports on the same physical port to login to each "
"other.");
/*
# vport_restrict_login: This parameter allows/prevents logins
# between Virtual Ports and remote initiators.
# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
# other initiators and will attempt to PLOGI all remote ports.
# When this parameter is set (1) Virtual Ports will reject PLOGIs from
# remote ports and will not attempt to PLOGI to other initiators.
# This parameter does not restrict to the physical port.
# This parameter does not restrict logins to Fabric resident remote ports.
# Default value of this parameter is 1.
*/
LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
"Restrict virtual ports login to remote initiators.");
/*
# Some disk devices have a "select ID" or "select Target" capability.
# From a protocol standpoint "select ID" usually means select the
......@@ -1127,6 +1324,7 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_info,
&class_device_attr_serialnum,
......@@ -1143,6 +1341,8 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_lpfc_log_verbose,
&class_device_attr_lpfc_lun_queue_depth,
&class_device_attr_lpfc_hba_queue_depth,
&class_device_attr_lpfc_peer_port_login,
&class_device_attr_lpfc_vport_restrict_login,
&class_device_attr_lpfc_nodev_tmo,
&class_device_attr_lpfc_devloss_tmo,
&class_device_attr_lpfc_fcp_class,
......@@ -1161,6 +1361,13 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_nport_evt_cnt,
&class_device_attr_management_version,
&class_device_attr_board_mode,
&class_device_attr_max_vpi,
&class_device_attr_used_vpi,
&class_device_attr_max_rpi,
&class_device_attr_used_rpi,
&class_device_attr_max_xri,
&class_device_attr_used_xri,
&class_device_attr_npiv_info,
&class_device_attr_issue_reset,
&class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo,
......@@ -1299,7 +1506,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
} else {
if (phba->sysfs_mbox.state != SMBOX_WRITING ||
phba->sysfs_mbox.offset != off ||
phba->sysfs_mbox.mbox == NULL ) {
phba->sysfs_mbox.mbox == NULL) {
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
......@@ -1406,6 +1613,8 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
return -EPERM;
}
phba->sysfs_mbox.mbox->vport = vport;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
......@@ -1480,12 +1689,12 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
int error;
error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
&sysfs_ctlreg_attr);
&sysfs_ctlreg_attr);
if (error)
goto out;
error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
&sysfs_mbox_attr);
&sysfs_mbox_attr);
if (error)
goto out_remove_ctlreg_attr;
......@@ -1527,7 +1736,9 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
spin_lock_irq(shost->host_lock);
if (lpfc_is_link_up(phba)) {
if (vport->port_type == LPFC_NPIV_PORT) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else if (lpfc_is_link_up(phba)) {
if (phba->fc_topology == TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
......@@ -1563,6 +1774,7 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
/* Links up, beyond this port_type reports state */
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
break;
......@@ -1644,13 +1856,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
unsigned long seconds;
int rc = 0;
/* prevent udev from issuing mailbox commands
* until the port is configured.
*/
/*
* prevent udev from issuing mailbox commands until the port is
* configured.
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
(phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
return NULL;
return NULL;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return NULL;
......@@ -1664,6 +1877,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
......@@ -1690,6 +1904,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
......@@ -1701,7 +1916,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
if (rc == MBX_TIMEOUT)
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
else
mempool_free( pmboxq, phba->mbox_mem_pool);
mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
......@@ -1769,6 +1984,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
......@@ -1788,6 +2004,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
......@@ -1950,6 +2167,69 @@ struct fc_function_template lpfc_transport_functions = {
.issue_fc_host_lip = lpfc_issue_lip,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
.vport_create = lpfc_vport_create,
.vport_delete = lpfc_vport_delete,
.dd_fcvport_size = sizeof(struct lpfc_vport *),
};
struct fc_function_template lpfc_vport_transport_functions = {
/* fixed attributes the driver supports */
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = lpfc_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = lpfc_get_host_port_state,
.show_host_port_state = 1,
/* active_fc4s is shown but doesn't change (thus no get function) */
.show_host_active_fc4s = 1,
.get_host_speed = lpfc_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = lpfc_get_host_fabric_name,
.show_host_fabric_name = 1,
/*
* The LPFC driver treats linkdown handling as target loss events
* so there are no sysfs handlers for link_down_tmo.
*/
.get_fc_host_stats = lpfc_get_stats,
.reset_fc_host_stats = lpfc_reset_stats,
.dd_fcrport_size = sizeof(struct lpfc_rport_data),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_starget_port_id = lpfc_get_starget_port_id,
.show_starget_port_id = 1,
.get_starget_node_name = lpfc_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = lpfc_get_starget_port_name,
.show_starget_port_name = 1,
.issue_fc_host_lip = lpfc_issue_lip,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
.vport_disable = lpfc_vport_disable,
};
void
......@@ -1972,6 +2252,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
......
......@@ -28,15 +28,18 @@ int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
LPFC_MBOXQ_t *, uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
......@@ -51,6 +54,10 @@ void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
int lpfc_can_disctmo(struct lpfc_vport *);
int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_unreg_all_rpis(struct lpfc_vport *);
void lpfc_unreg_default_rpis(struct lpfc_vport *);
void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, struct lpfc_nodelist *);
void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
......@@ -60,25 +67,33 @@ struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
void lpfc_disc_flush_list(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
int lpfc_do_work(void *);
int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
struct lpfc_nodelist *);
void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
int lpfc_els_abort_flogi(struct lpfc_hba *);
int lpfc_initial_flogi(struct lpfc_vport *);
int lpfc_initial_fdisc(struct lpfc_vport *);
int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
......@@ -95,7 +110,7 @@ void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_els_handle_rscn(struct lpfc_vport *);
int lpfc_els_flush_rscn(struct lpfc_vport *);
void lpfc_els_flush_rscn(struct lpfc_vport *);
int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
void lpfc_els_flush_cmd(struct lpfc_vport *);
int lpfc_els_disc_adisc(struct lpfc_vport *);
......@@ -105,7 +120,7 @@ void lpfc_els_timeout_handler(struct lpfc_vport *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_ns_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
......@@ -136,6 +151,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
......@@ -144,6 +160,7 @@ struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
void lpfc_poll_start_timer(struct lpfc_hba * phba);
......@@ -176,11 +193,10 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
struct lpfc_sli_ring *,
dma_addr_t);
int lpfc_sli_hbqbuf_fill_hbq(struct lpfc_hba *);
void lpfc_sli_hbqbuf_free(struct lpfc_hba *, void *, dma_addr_t);
int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
......@@ -192,12 +208,15 @@ int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
void *);
struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
uint32_t timeout);
uint32_t timeout);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring,
......@@ -210,11 +229,13 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
/* Function prototypes. */
const char* lpfc_info(struct Scsi_Host *);
void lpfc_scan_start(struct Scsi_Host *);
......@@ -226,14 +247,34 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct class_device_attribute *lpfc_hba_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
extern int lpfc_npiv_enable;
void lpfc_get_hba_sym_node_name(struct lpfc_hba *phba, uint8_t *symbp);
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *);
void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int);
void lpfc_post_hba_setup_vport_init(struct lpfc_vport *);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
void destroy_port(struct lpfc_vport *);
int lpfc_get_instance(void);
void lpfc_host_attrib_init(struct Scsi_Host *);
/* Interface exported by fabric iocb scheduler */
int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_fabric_abort_vport(struct lpfc_vport *);
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
void lpfc_fabric_abort_flogi(struct lpfc_hba *);
void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_adjust_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
......@@ -40,6 +40,7 @@
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
#include "lpfc_vport.h"
#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
* incapable of reporting */
......@@ -74,15 +75,13 @@ lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
__FUNCTION__, __LINE__,
piocbq, mp, size,
piocbq->iocb.ulpStatus);
}
static void
lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
struct hbq_dmabuf *sp, uint32_t size)
struct lpfc_dmabuf *mp, uint32_t size)
{
struct lpfc_dmabuf *mp = NULL;
mp = sp ? &sp->dbuf : NULL;
if (!mp) {
printk(KERN_ERR "%s (%d): Unsolited CT, no "
"HBQ buffer, piocbq = %p, status = x%x\n",
......@@ -102,21 +101,26 @@ void
lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocbq)
{
struct lpfc_dmabuf *mp = NULL;
struct hbq_dmabuf *sp = NULL;
IOCB_t *icmd = &piocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
dma_addr_t paddr;
uint32_t size;
struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
piocbq->context2 = NULL;
piocbq->context3 = NULL;
if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_hbqbuf_fill_hbq(phba);
else
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba, pring, 0, 1);
return;
}
......@@ -139,23 +143,14 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
size = icmd->un.cont64[0].tus.f.bdeSize;
sp = lpfc_sli_hbqbuf_find(phba, icmd->un.ulpWord[3]);
if (sp)
phba->hbq_buff_count--;
lpfc_ct_ignore_hbq_buffer(phba, iocbq, sp, size);
lpfc_sli_free_hbq(phba, sp);
lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
lpfc_in_buf_free(phba, bdeBuf1);
if (icmd->ulpBdeCount == 2) {
sp = lpfc_sli_hbqbuf_find(phba,
icmd->un.ulpWord[15]);
if (sp)
phba->hbq_buff_count--;
lpfc_ct_ignore_hbq_buffer(phba, iocbq, sp,
lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
size);
lpfc_sli_free_hbq(phba, sp);
lpfc_in_buf_free(phba, bdeBuf2);
}
}
lpfc_sli_hbqbuf_fill_hbq(phba);
} else {
struct lpfc_iocbq *next;
......@@ -176,8 +171,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
paddr);
size = icmd->un.cont64[i].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
lpfc_in_buf_free(phba, mp);
}
list_del(&iocbq->list);
lpfc_sli_release_iocbq(phba, iocbq);
......@@ -222,7 +216,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
INIT_LIST_HEAD(&mp->list);
if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
else
mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
......@@ -242,8 +237,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
/* build buffer ptr list for IOCB */
bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
bpl->tus.f.bdeSize = (uint16_t) cnt;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
......@@ -262,13 +257,14 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
uint32_t tmo)
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
/* Allocate buffer for command iocb */
geniocb = lpfc_sli_get_iocbq(phba);
......@@ -311,15 +307,25 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
/* For GEN_REQUEST64_CR, use the RPI */
icmd->ulpCt_h = 0;
icmd->ulpCt_l = 0;
}
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0119 Issue GEN REQ IOCB for NPORT x%x "
"Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
icmd->ulpIoTag, vport->port_state);
"%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n", phba->brd_no, vport->vpi,
ndlp->nlp_DID, icmd->ulpIoTag,
vport->port_state);
geniocb->iocb_cmpl = cmpl;
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
geniocb->retry = retry;
rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
if (rc == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, geniocb);
return 1;
}
......@@ -332,7 +338,7 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
uint32_t rsp_size)
uint32_t rsp_size, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
......@@ -349,7 +355,7 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
return -ENOMEM;
status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
cnt+1, 0);
cnt+1, 0, retry);
if (status) {
lpfc_free_ct_rsp(phba, outmp);
return -ENOMEM;
......@@ -357,10 +363,23 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
return 0;
}
static struct lpfc_vport *
lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
struct lpfc_vport *vport_curr;
list_for_each_entry(vport_curr, &phba->port_list, listentry) {
if ((vport_curr->fc_myDID) &&
(vport_curr->fc_myDID == did))
return vport_curr;
}
return NULL;
}
static int
lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ct_request *Response =
(struct lpfc_sli_ct_request *) mp->virt;
......@@ -372,6 +391,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
struct list_head head;
lpfc_set_disctmo(vport);
vport->num_disc_nodes = 0;
list_add_tail(&head, &mp->list);
......@@ -392,25 +412,64 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
/* Get next DID from NameServer List */
CTentry = *ctptr++;
Did = ((be32_to_cpu(CTentry)) & Mask_DID);
ndlp = NULL;
/* Check for rscn processing or not */
if (Did != vport->fc_myDID)
ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0238 Process x%x NameServer"
" Rsp Data: x%x x%x x%x\n",
phba->brd_no,
Did, ndlp->nlp_flag,
vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0239 Skip x%x NameServer "
"Rsp Data: x%x x%x x%x\n",
phba->brd_no,
Did, Size, vport->fc_flag,
vport->fc_rscn_id_cnt);
/*
* Check for rscn processing or not
* To conserve rpi's, filter out addresses for other
* vports on the same physical HBAs.
*/
if ((Did != vport->fc_myDID) &&
((lpfc_find_vport_by_did(phba, Did) == NULL) ||
phba->cfg_peer_port_login)) {
if ((vport->port_type != LPFC_NPIV_PORT) ||
(vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
(!phba->cfg_vport_restrict_login)) {
ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp) {
lpfc_printf_log(phba, KERN_INFO,
LOG_DISCOVERY,
"%d (%d):0238 Process "
"x%x NameServer Rsp"
"Data: x%x x%x x%x\n",
phba->brd_no,
vport->vpi, Did,
ndlp->nlp_flag,
vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_printf_log(phba, KERN_INFO,
LOG_DISCOVERY,
"%d (%d):0239 Skip x%x "
"NameServer Rsp Data: "
"x%x x%x\n",
phba->brd_no,
vport->vpi, Did,
vport->fc_flag,
vport->fc_rscn_id_cnt);
}
} else {
if (!(vport->fc_flag & FC_RSCN_MODE) ||
(lpfc_rscn_payload_check(vport, Did))) {
if (lpfc_ns_cmd(vport,
SLI_CTNS_GFF_ID,
0, Did) == 0)
vport->num_disc_nodes++;
}
else {
lpfc_printf_log(phba, KERN_INFO,
LOG_DISCOVERY,
"%d (%d):0245 Skip x%x "
"NameServer Rsp Data: "
"x%x x%x\n",
phba->brd_no,
vport->vpi, Did,
vport->fc_flag,
vport->fc_rscn_id_cnt);
}
}
}
if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
goto nsout1;
......@@ -422,34 +481,19 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
nsout1:
list_del(&head);
/*
* The driver has cycled through all Nports in the RSCN payload.
* Complete the handling by cleaning up and marking the
* current driver state.
*/
if (vport->port_state == LPFC_VPORT_READY) {
lpfc_els_flush_rscn(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
spin_unlock_irq(shost->host_lock);
}
return 0;
}
static void
lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_dmabuf *bmp;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
struct lpfc_nodelist *ndlp;
struct lpfc_sli_ct_request *CTrsp;
int rc;
......@@ -460,33 +504,41 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING)
goto out;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
goto out;
goto err1;
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
vport->fc_ns_retry++;
/* CT command is being retried */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
rc = lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT);
if (rc == 0)
goto out;
}
}
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, 0);
if (rc == 0)
goto out;
}
err1:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
phba->brd_no, vport->vpi, irsp->ulpStatus,
vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0208 NameServer Rsp "
"%d (%d):0208 NameServer Rsp "
"Data: x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
vport->fc_flag);
lpfc_ns_rsp(vport, outp,
(uint32_t) (irsp->un.genreq64.bdl.bdeSize));
......@@ -494,21 +546,19 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0240 NameServer Rsp Error "
"%d (%d):0240 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
vport->fc_flag);
} else {
/* NameServer Rsp Error */
lpfc_printf_log(phba,
KERN_INFO,
LOG_DISCOVERY,
"%d:0241 NameServer Rsp Error "
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0241 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
......@@ -516,8 +566,111 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
/* Link up / RSCN discovery */
lpfc_disc_start(vport);
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
* Complete the handling by cleaning up and marking the
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
if (vport->fc_flag & FC_RSCN_MODE) {
lpfc_els_flush_rscn(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
spin_unlock_irq(shost->host_lock);
}
else
lpfc_els_flush_rscn(vport);
}
lpfc_disc_start(vport);
}
out:
lpfc_free_ct_rsp(phba, outp);
lpfc_mbuf_free(phba, inp->virt, inp->phys);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(inp);
kfree(bmp);
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
void
lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_dmabuf *bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
int did;
uint8_t fbits;
struct lpfc_nodelist *ndlp;
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
if ((fbits & FC4_FEATURE_INIT) &&
!(fbits & FC4_FEATURE_TARGET)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0245 Skip x%x GFF "
"NameServer Rsp Data: (init) "
"x%x x%x\n", phba->brd_no,
vport->vpi, did, fbits,
vport->fc_rscn_id_cnt);
goto out;
}
}
}
/* This is a target port, unregistered port, or the GFF_ID failed */
ndlp = lpfc_setup_disc_node(vport, did);
if (ndlp) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi,
did, ndlp->nlp_flag, vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0243 Skip x%x GFF "
"NameServer Rsp Data: x%x x%x\n",
phba->brd_no, vport->vpi, did,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
out:
/* Link up / RSCN discovery */
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
* Complete the handling by cleaning up and marking the
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
if (vport->fc_flag & FC_RSCN_MODE) {
lpfc_els_flush_rscn(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
spin_unlock_irq(shost->host_lock);
}
else
lpfc_els_flush_rscn(vport);
}
lpfc_disc_start(vport);
}
lpfc_free_ct_rsp(phba, outp);
lpfc_mbuf_free(phba, inp->virt, inp->phys);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
......@@ -527,15 +680,19 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
static void
lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *bmp;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
int cmdcode, rc;
uint8_t retry;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
......@@ -545,16 +702,40 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
irsp = &rspiocb->iocb;
cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
CommandResponse.bits.CmdRsp);
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
/* NS request completes status <ulpStatus> CmdRsp <CmdRsp> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0209 RFT request completes ulpStatus x%x "
"%d (%d):0209 NS request %x completes "
"ulpStatus x%x / x%x "
"CmdRsp x%x, Context x%x, Tag x%x\n",
phba->brd_no, irsp->ulpStatus,
phba->brd_no, vport->vpi,
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4],
CTrsp->CommandResponse.bits.CmdRsp,
cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
if (irsp->ulpStatus) {
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;
if (retry >= LPFC_MAX_NS_RETRY)
goto out;
retry++;
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0216 Retrying NS cmd %x\n",
phba->brd_no, vport->vpi, cmdcode);
rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
if (rc == 0)
goto out;
}
out:
lpfc_free_ct_rsp(phba, outp);
lpfc_mbuf_free(phba, inp->virt, inp->phys);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
......@@ -572,6 +753,14 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
static void
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
static void
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
......@@ -581,23 +770,54 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
if (irsp->ulpStatus != IOSTAT_SUCCESS)
vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
void
lpfc_get_hba_sym_node_name(struct lpfc_hba *phba, uint8_t *symbp)
int
lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
int n;
uint8_t *wwn = vport->phba->wwpn;
n = snprintf(symbol, size,
"Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
wwn[0], wwn[1], wwn[2], wwn[3],
wwn[4], wwn[5], wwn[6], wwn[7]);
if (vport->port_type == LPFC_PHYSICAL_PORT)
return n;
if (n < size)
n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
if (n < size && vport->vname)
n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
return n;
}
int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
char fwrev[16];
int n;
lpfc_decode_firmware_rev(phba, fwrev, 0);
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
fwrev, lpfc_release_version);
return;
n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
vport->phba->ModelName, fwrev, lpfc_release_version);
return n;
}
/*
......@@ -608,8 +828,10 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba *phba, uint8_t *symbp)
* LI_CTNS_RFT_ID
*/
int
lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
uint8_t retry, uint32_t context)
{
struct lpfc_nodelist * ndlp;
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *mp, *bmp;
struct lpfc_sli_ct_request *CtReq;
......@@ -617,6 +839,11 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *) = NULL;
uint32_t rsp_size = 1024;
size_t size;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
return 1;
/* fill in BDEs for command */
/* Allocate buffer for command payload */
......@@ -640,24 +867,26 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
goto ns_cmd_free_bmp;
/* NameServer Req */
lpfc_printf_log(phba,
KERN_INFO,
LOG_DISCOVERY,
"%d:0236 NameServer Req Data: x%x x%x x%x\n",
phba->brd_no, cmdcode, vport->fc_flag,
lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
"%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
vport->fc_rscn_id_cnt);
bpl = (struct ulp_bde64 *) bmp->virt;
memset(bpl, 0, sizeof(struct ulp_bde64));
bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->tus.f.bdeFlags = 0;
if (cmdcode == SLI_CTNS_GID_FT)
bpl->tus.f.bdeSize = GID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_GFF_ID)
bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFT_ID)
bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RNN_ID)
bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSPN_ID)
bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
......@@ -678,13 +907,20 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_GID_FT);
CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
if (vport->port_state < LPFC_VPORT_READY)
if (vport->port_state < LPFC_NS_QRY)
vport->port_state = LPFC_NS_QRY;
lpfc_set_disctmo(vport);
cmpl = lpfc_cmpl_ct_cmd_gid_ft;
rsp_size = FC_MAX_NS_RSP;
break;
case SLI_CTNS_GFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_GFF_ID);
CtReq->un.gff.PortId = be32_to_cpu(context);
cmpl = lpfc_cmpl_ct_cmd_gff_id;
break;
case SLI_CTNS_RFT_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFT_ID);
......@@ -693,17 +929,6 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
case SLI_CTNS_RFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);
CtReq->un.rff.feature_res = 0;
CtReq->un.rff.feature_tgt = 0;
CtReq->un.rff.type_code = FC_FCP_DATA;
CtReq->un.rff.feature_init = 1;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
case SLI_CTNS_RNN_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RNN_ID);
......@@ -713,18 +938,39 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
cmpl = lpfc_cmpl_ct_cmd_rnn_id;
break;
case SLI_CTNS_RSPN_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RSPN_ID);
CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
size = sizeof(CtReq->un.rspn.symbname);
CtReq->un.rspn.len =
lpfc_vport_symbolic_port_name(vport,
CtReq->un.rspn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rspn_id;
break;
case SLI_CTNS_RSNN_NN:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RSNN_NN);
memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
size = sizeof(CtReq->un.rsnn.symbname);
CtReq->un.rsnn.len =
lpfc_vport_symbolic_node_name(vport,
CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
case SLI_CTNS_RFF_ID:
vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
CtReq->un.rff.type_code = FC_FCP_DATA;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
}
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size))
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
/* On success, The cmpl function will free the buffers */
return 0;
......@@ -757,8 +1003,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0220 FDMI rsp failed Data: x%x\n",
phba->brd_no, be16_to_cpu(fdmi_cmd));
"%d (%d):0220 FDMI rsp failed Data: x%x\n",
phba->brd_no, vport->vpi,
be16_to_cpu(fdmi_cmd));
}
switch (be16_to_cpu(fdmi_cmd)) {
......@@ -828,9 +1075,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
/* FDMI request */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0218 FDMI Request Data: x%x x%x x%x\n",
phba->brd_no,
vport->fc_flag, vport->port_state, cmdcode);
"%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, vport->fc_flag,
vport->port_state, cmdcode);
CtReq = (struct lpfc_sli_ct_request *) mp->virt;
......@@ -1134,15 +1381,15 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
}
bpl = (struct ulp_bde64 *) bmp->virt;
bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->tus.f.bdeFlags = 0;
bpl->tus.f.bdeSize = size;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
cmpl = lpfc_cmpl_ct_cmd_fdmi;
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
return 0;
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
......@@ -1155,8 +1402,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0244 Issue FDMI request failed Data: x%x\n",
phba->brd_no, cmdcode);
"%d (%d):0244 Issue FDMI request failed Data: x%x\n",
phba->brd_no, vport->vpi, cmdcode);
return 1;
}
......@@ -1170,10 +1417,15 @@ lpfc_fdmi_tmo(unsigned long ptr)
spin_lock_irqsave(&vport->work_port_lock, iflag);
if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
vport->work_port_events |= WORKER_FDMI_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
else
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
}
void
......
......@@ -36,13 +36,14 @@ enum lpfc_work_type {
LPFC_EVT_WARM_START,
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
};
/* structure used to queue event to the discovery tasklet */
struct lpfc_work_evt {
struct list_head evt_listp;
void * evt_arg1;
void * evt_arg2;
void *evt_arg1;
void *evt_arg2;
enum lpfc_work_type evt;
};
......@@ -73,10 +74,12 @@ struct lpfc_nodelist {
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct timer_list nlp_initiator_tmr; /* Used with dev_loss */
struct fc_rport *rport; /* Corresponding FC transport
port structure */
struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
struct lpfc_work_evt dev_loss_evt;
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
......@@ -99,6 +102,7 @@ struct lpfc_nodelist {
#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
NPR list */
#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
......
......@@ -35,9 +35,13 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
static int lpfc_max_els_tries = 3;
static int
......@@ -58,10 +62,10 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 0;
/* Pending Link Event during Discovery */
lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
"%d:0237 Pending Link Event during "
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0237 Pending Link Event during "
"Discovery: State x%x\n",
phba->brd_no, phba->pport->port_state);
phba->brd_no, vport->vpi, phba->pport->port_state);
/* CLEAR_LA should re-enable link attention events and
* we should then imediately take a LATT event. The
......@@ -73,12 +77,10 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
vport->fc_flag |= FC_ABORT_DISCOVERY;
spin_unlock_irq(shost->host_lock);
if (phba->link_state != LPFC_CLEAR_LA) {
if (phba->link_state != LPFC_CLEAR_LA)
lpfc_issue_clear_la(phba, vport);
}
return 1;
}
static struct lpfc_iocbq *
......@@ -106,7 +108,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
((pcmd->virt = lpfc_mbuf_alloc(phba,
MEM_PRI, &(pcmd->phys))) == 0)) {
kfree(pcmd);
......@@ -119,7 +121,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* Allocate buffer for response payload */
if (expectRsp) {
prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
......@@ -136,7 +138,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
}
/* Allocate buffer for Buffer ptr list */
pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (pbuflist)
pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&pbuflist->phys);
......@@ -157,18 +159,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
icmd->un.elsreq64.remoteID = did; /* DID */
if (expectRsp) {
icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
}
icmd->ulpBdeCount = 1;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
icmd->ulpContext = vport->vpi;
icmd->ulpCt_h = 0;
icmd->ulpCt_l = 1;
}
bpl = (struct ulp_bde64 *) pbuflist->virt;
bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
......@@ -186,7 +196,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
}
/* Save for completion so we can release these resources */
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (elscmd != ELS_CMD_LS_RJT)
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->context2 = pcmd;
elsiocb->context3 = pbuflist;
elsiocb->retry = retry;
......@@ -200,16 +211,16 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (expectRsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0116 Xmit ELS command x%x to remote "
"%d (%d):0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state: x%x\n",
phba->brd_no, elscmd, did,
phba->brd_no, vport->vpi, elscmd, did,
elsiocb->iotag, vport->port_state);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0117 Xmit ELS response x%x to remote "
"%d (%d):0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x\n",
phba->brd_no, elscmd,
phba->brd_no, vport->vpi, elscmd,
ndlp->nlp_DID, elsiocb->iotag, cmdSize);
}
......@@ -218,15 +229,76 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, IOCB_t *irsp)
lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct serv_parm *sp;
int rc;
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
goto fail;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto fail;
vport->port_state = LPFC_FABRIC_CFG_LINK;
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED)
goto fail_free_mbox;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto fail;
rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
0);
if (rc)
goto fail_free_mbox;
mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED)
goto fail_issue_reg_login;
return 0;
fail_issue_reg_login:
lpfc_nlp_put(ndlp);
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
fail_free_mbox:
mempool_free(mbox, phba->mbox_mem_pool);
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0249 Cannot issue Register Fabric login\n",
phba->brd_no, vport->vpi);
return -ENXIO;
}
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, IOCB_t *irsp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_FABRIC;
spin_unlock_irq(shost->host_lock);
......@@ -251,7 +323,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
......@@ -265,47 +337,59 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cmn.bbRcvSizeLsb;
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto fail;
vport->port_state = LPFC_FABRIC_CFG_LINK;
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
if (sp->cmn.response_multiple_NPort) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
"%d:1816 FLOGI NPIV supported, "
"response data 0x%x\n",
phba->brd_no,
sp->cmn.response_multiple_NPort);
phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED)
goto fail_free_mbox;
} else {
/* Because we asked f/w for NPIV it still expects us
to call reg_vnpid atleast for the physcial host */
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
"%d:1817 Fabric does not support NPIV "
"- configuring single port mode.\n",
phba->brd_no);
phba->vpi_cnt = 1;
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
}
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto fail;
rc = lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0);
if (rc)
goto fail_free_mbox;
if ((vport->fc_prevDID != vport->fc_myDID) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed.
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
spin_lock_irq(shost->host_lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
}
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED)
goto fail_issue_reg_login;
ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
lpfc_register_new_vport(phba, vport, ndlp);
return 0;
}
lpfc_issue_fabric_reglogin(vport);
return 0;
fail_issue_reg_login:
lpfc_nlp_put(ndlp);
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
fail_free_mbox:
mempool_free(mbox, phba->mbox_mem_pool);
fail:
return -ENXIO;
}
/*
......@@ -322,12 +406,13 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
phba->vpi_cnt = 1;
spin_unlock_irq(shost->host_lock);
phba->fc_edtov = FF_DEF_EDTOV;
phba->fc_ratov = FF_DEF_RATOV;
rc = memcmp(&vport->fc_portname, &sp->portName,
sizeof(struct lpfc_name));
sizeof(vport->fc_portname));
if (rc >= 0) {
/* This side will initiate the PLOGI */
spin_lock_irq(shost->host_lock);
......@@ -352,7 +437,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox,
MBX_NOWAIT | MBX_STOP_IOCB);
MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
goto fail;
......@@ -392,7 +477,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
return 0;
fail:
fail:
return -ENXIO;
}
......@@ -422,6 +507,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
phba->vpi_cnt = 1;
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
......@@ -433,11 +519,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* FLOGI failure */
lpfc_printf_log(phba,
KERN_INFO,
LOG_ELS,
"%d:0100 FLOGI failure Data: x%x x%x x%x\n",
phba->brd_no,
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (%d):0100 FLOGI failure Data: x%x x%x "
"x%x\n",
phba->brd_no, vport->vpi,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
goto flogifail;
......@@ -453,9 +538,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI completes successfully */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0101 FLOGI completes sucessfully "
"%d (%d):0101 FLOGI completes sucessfully "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
......@@ -475,6 +560,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
flogifail:
lpfc_nlp_put(ndlp);
phba->vpi_cnt = 1;
if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
(irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
......@@ -506,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pring = &phba->sli.ring[LPFC_ELS_RING];
cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI);
if (!elsiocb)
return 1;
......@@ -517,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
pcmd += sizeof (uint32_t);
memcpy(pcmd, &vport->fc_sparam, sizeof (struct serv_parm));
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
/* Setup CSPs accordingly for Fabric */
......@@ -532,6 +619,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
sp->cmn.request_multiple_Nport = 1;
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
tmo = phba->fc_ratov;
phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
lpfc_set_disctmo(vport);
......@@ -539,7 +634,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->fc_stat.elsXmitFLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
......@@ -572,8 +667,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
icmd->un.elsreq64.bdl.ulpIoTag32) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
if (ndlp && (ndlp->nlp_DID == Fabric_DID))
if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
}
}
spin_unlock_irq(&phba->hbalock);
......@@ -604,6 +700,28 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 1;
}
int
lpfc_initial_fdisc(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 0;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
} else {
lpfc_dequeue_node(vport, ndlp);
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
lpfc_nlp_put(ndlp);
}
return 1;
}
static void
lpfc_more_plogi(struct lpfc_vport *vport)
{
......@@ -615,9 +733,9 @@ lpfc_more_plogi(struct lpfc_vport *vport)
/* Continue discovery with <num_disc_nodes> PLOGIs to go */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0232 Continue discovery with %d PLOGIs to go "
"%d (%d):0232 Continue discovery with %d PLOGIs to go "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->num_disc_nodes,
phba->brd_no, vport->vpi, vport->num_disc_nodes,
vport->fc_plogi_cnt, vport->fc_flag, vport->port_state);
/* Check to see if there are more PLOGIs to be sent */
......@@ -629,14 +747,13 @@ lpfc_more_plogi(struct lpfc_vport *vport)
}
static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_nodelist *new_ndlp;
uint32_t *lp;
struct serv_parm *sp;
uint8_t name[sizeof (struct lpfc_name)];
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc;
/* Fabric nodes can have the same WWPN so we don't bother searching
......@@ -645,8 +762,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
if (ndlp->nlp_type & NLP_FABRIC)
return ndlp;
lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
memset(name, 0, sizeof(struct lpfc_name));
/* Now we find out if the NPort we are logging into, matches the WWPN
......@@ -701,8 +817,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
if (!ndlp) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID,
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag);
goto out;
}
......@@ -717,11 +837,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0102 PLOGI completes to NPort x%x "
"%d (%d):0102 PLOGI completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
vport->num_disc_nodes);
phba->brd_no, vport->vpi, ndlp->nlp_DID,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
......@@ -748,24 +868,33 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* PLOGI failed */
if (ndlp->nlp_DID == NameServer_DID) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0250 Nameserver login error: "
"0x%x / 0x%x\n",
phba->brd_no, vport->vpi,
irsp->ulpStatus, irsp->un.ulpWord[4]);
}
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
rc = NLP_STE_FREED_NODE;
} else {
rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
NLP_EVT_CMPL_PLOGI);
}
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
cmdiocb->context2)->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
cmdiocb->context2)->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
NLP_EVT_CMPL_PLOGI);
}
if (disc && vport->num_disc_nodes) {
......@@ -811,11 +940,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int ret;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
ELS_CMD_PLOGI);
if (!elsiocb)
......@@ -826,8 +956,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
/* For PLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
pcmd += sizeof (uint32_t);
memcpy(pcmd, &vport->fc_sparam, sizeof (struct serv_parm));
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
if (sp->cmn.fcphLow < FC_PH_4_3)
......@@ -838,7 +968,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
if (ret == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
......@@ -867,10 +999,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0103 PRLI completes to NPort x%x "
"%d (%d):0103 PRLI completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout,
phba->brd_no, vport->vpi, ndlp->nlp_DID,
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
vport->num_disc_nodes);
vport->fc_prli_sent--;
......@@ -887,18 +1019,18 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
goto out;
} else {
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
NLP_EVT_CMPL_PRLI);
}
} else {
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
NLP_EVT_CMPL_PRLI);
}
out:
......@@ -923,7 +1055,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_PRLI);
if (!elsiocb)
......@@ -933,9 +1065,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PRLI request, remainder of payload is service parameters */
memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
npr = (PRLI *) pcmd;
......@@ -982,9 +1114,9 @@ lpfc_more_adisc(struct lpfc_vport *vport)
/* Continue discovery with <num_disc_nodes> ADISCs to go */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0210 Continue discovery with %d ADISCs to go "
"%d (%d):0210 Continue discovery with %d ADISCs to go "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->num_disc_nodes,
phba->brd_no, vport->vpi, vport->num_disc_nodes,
vport->fc_adisc_cnt, vport->fc_flag, vport->port_state);
/* Check to see if there are more ADISCs to be sent */
......@@ -1048,11 +1180,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0104 ADISC completes to NPort x%x "
"%d (%d):0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
vport->num_disc_nodes);
phba->brd_no, vport->vpi, ndlp->nlp_DID,
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
......@@ -1095,12 +1227,41 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if we are done with ADISC authentication */
if (vport->num_disc_nodes == 0) {
lpfc_can_disctmo(vport);
/* If we get here, there is nothing left to wait for */
if (vport->port_state < LPFC_VPORT_READY &&
phba->link_state != LPFC_CLEAR_LA) {
/* If we get here, there is nothing left to ADISC */
/*
* For NPIV, cmpl_reg_vpi will set port_state to READY,
* and continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE)) {
lpfc_issue_reg_vpi(phba, vport);
goto out;
}
/*
* For SLI2, we need to set port_state to READY
* and continue discovery.
*/
if (vport->port_state < LPFC_VPORT_READY) {
/* If we get here, there is nothing to ADISC */
if (vport->port_type == LPFC_PHYSICAL_PORT)
lpfc_issue_clear_la(phba, vport);
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
/* go thru NPR list, issue ELS PLOGIs */
if (vport->fc_npr_cnt)
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &=
~FC_NDISC_ACTIVE;
spin_unlock_irq(
shost->host_lock);
lpfc_can_disctmo(vport);
}
}
vport->port_state = LPFC_VPORT_READY;
} else {
lpfc_rscn_disc(vport);
}
......@@ -1125,7 +1286,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t *pcmd;
uint16_t cmdsize;
cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ADISC);
if (!elsiocb)
......@@ -1136,13 +1297,13 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For ADISC request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
/* Fill in ADISC payload */
ap = (ADISC *) pcmd;
ap->hardAL_PA = phba->fc_pref_ALPA;
memcpy(&ap->portName, &vport->fc_portname, sizeof (struct lpfc_name));
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof (struct lpfc_name));
memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ap->DID = be32_to_cpu(vport->fc_myDID);
phba->fc_stat.elsXmitADISC++;
......@@ -1181,16 +1342,25 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0105 LOGO completes to NPort x%x "
"%d (%d):0105 LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout,
phba->brd_no, vport->vpi, ndlp->nlp_DID,
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
/* NLP_EVT_DEVICE_RM should unregister the RPI
* which should abort all outstanding IOs.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
goto out;
}
if (irsp->ulpStatus) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
......@@ -1199,20 +1369,20 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* LOGO failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
(irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
goto out;
} else {
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_LOGO);
NLP_EVT_CMPL_LOGO);
}
} else {
/* Good status, call state machine.
* This will unregister the rpi if needed.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_LOGO);
NLP_EVT_CMPL_LOGO);
}
out:
......@@ -1232,11 +1402,12 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LOGO);
if (!elsiocb)
......@@ -1245,19 +1416,21 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
/* Fill in LOGO payload */
*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
pcmd += sizeof (uint32_t);
memcpy(pcmd, &vport->fc_portname, sizeof (struct lpfc_name));
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
if (rc == IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
......@@ -1277,11 +1450,10 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba,
KERN_INFO,
LOG_ELS,
"%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
phba->brd_no,
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x "
"x%x\n",
phba->brd_no, vport->vpi,
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
......@@ -1305,7 +1477,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof (uint32_t) + sizeof (SCR));
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 1;
......@@ -1324,10 +1496,10 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
/* For SCR, remainder of payload is SCR parameter page */
memset(pcmd, 0, sizeof (SCR));
memset(pcmd, 0, sizeof(SCR));
((SCR *) pcmd)->Function = SCR_FUNC_FULL;
phba->fc_stat.elsXmitSCR++;
......@@ -1358,7 +1530,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof (uint32_t) + sizeof (FARP));
cmdsize = (sizeof(uint32_t) + sizeof(FARP));
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 1;
......@@ -1376,25 +1548,25 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
/* Fill in FARPR payload */
fp = (FARP *) (pcmd);
memset(fp, 0, sizeof (FARP));
memset(fp, 0, sizeof(FARP));
lp = (uint32_t *) pcmd;
*lp++ = be32_to_cpu(nportid);
*lp++ = be32_to_cpu(vport->fc_myDID);
fp->Rflags = 0;
fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
memcpy(&fp->RportName, &vport->fc_portname, sizeof (struct lpfc_name));
memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof (struct lpfc_name));
memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ondlp = lpfc_findnode_did(vport, nportid);
if (ondlp) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
sizeof (struct lpfc_name));
sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
sizeof (struct lpfc_name));
sizeof(struct lpfc_name));
}
phba->fc_stat.elsXmitFARPR++;
......@@ -1470,18 +1642,17 @@ lpfc_els_retry_delay(unsigned long ptr)
{
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
struct lpfc_vport *vport = ndlp->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
unsigned long iflag;
unsigned long flags;
struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
ndlp = (struct lpfc_nodelist *) ptr;
phba = ndlp->vport->phba;
evtp = &ndlp->els_retry_evt;
spin_lock_irqsave(shost->host_lock, iflag);
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(shost->host_lock, iflag);
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
......@@ -1489,9 +1660,9 @@ lpfc_els_retry_delay(unsigned long ptr)
evtp->evt = LPFC_EVT_ELS_RETRY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(shost->host_lock, iflag);
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
......@@ -1550,6 +1721,9 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
break;
case ELS_CMD_FDISC:
lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
return;
}
......@@ -1598,7 +1772,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1;
delay = 1000;
retry = 1;
break;
......@@ -1606,9 +1780,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
break;
case IOERR_ILLEGAL_COMMAND:
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
(cmd == ELS_CMD_FDISC)) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0124 FDISC failed (3/6) retrying...\n",
phba->brd_no, vport->vpi);
lpfc_mbx_unreg_vpi(vport);
retry = 1;
/* Always retry for this case */
cmdiocb->retry = 0;
}
break;
case IOERR_NO_RESOURCES:
if (cmd == ELS_CMD_PLOGI)
delay = 1;
delay = 100;
retry = 1;
break;
......@@ -1641,27 +1827,56 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_CMD_IN_PROGRESS) {
if (cmd == ELS_CMD_PLOGI) {
delay = 1;
delay = 1000;
maxretry = 48;
}
retry = 1;
break;
}
if (cmd == ELS_CMD_PLOGI) {
delay = 1;
delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
break;
}
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0125 FDISC Failed (x%x)."
" Fabric out of resources\n",
phba->brd_no, vport->vpi, stat.un.lsRjtError);
lpfc_vport_set_state(vport,
FC_VPORT_NO_FABRIC_RSCS);
}
break;
case LSRJT_LOGICAL_BSY:
if (cmd == ELS_CMD_PLOGI) {
delay = 1;
delay = 1000;
maxretry = 48;
} else if (cmd == ELS_CMD_FDISC) {
/* Always retry for this case */
cmdiocb->retry = 0;
}
retry = 1;
break;
case LSRJT_LOGICAL_ERR:
case LSRJT_PROTOCOL_ERR:
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0123 FDISC Failed (x%x)."
" Fabric Detected Bad WWN\n",
phba->brd_no, vport->vpi, stat.un.lsRjtError);
lpfc_vport_set_state(vport,
FC_VPORT_FABRIC_REJ_WWN);
}
break;
}
break;
......@@ -1688,15 +1903,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0107 Retry ELS command x%x to remote "
"%d (%d):0107 Retry ELS command x%x to remote "
"NPORT x%x Data: x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
cmd, did, cmdiocb->retry, delay);
if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
/* If discovery / RSCN timer is running, reset it */
if (timer_pending(&vport->fc_disctmo) ||
(vport->fc_flag & FC_RSCN_MODE))
(vport->fc_flag & FC_RSCN_MODE))
lpfc_set_disctmo(vport);
}
......@@ -1705,7 +1920,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry;
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
/* delay is specified in milliseconds */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(delay));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
......@@ -1720,6 +1937,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case ELS_CMD_FLOGI:
lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_FDISC:
lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PLOGI:
if (ndlp) {
ndlp->nlp_prev_state = ndlp->nlp_state;
......@@ -1748,9 +1968,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* No retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0108 No retry ELS command x%x to remote NPORT x%x "
"Data: x%x\n",
phba->brd_no,
"%d (%d):0108 No retry ELS command x%x to remote "
"NPORT x%x Data: x%x\n",
phba->brd_no, vport->vpi,
cmd, did, cmdiocb->retry);
return 0;
......@@ -1798,10 +2018,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0109 ACC to LOGO completes to NPort x%x "
"%d (%d):0109 ACC to LOGO completes to NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
phba->brd_no, vport->vpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
switch (ndlp->nlp_state) {
case NLP_STE_UNUSED_NODE: /* node is just allocated */
......@@ -1848,9 +2068,9 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0110 ELS response tag x%x completes "
"%d (%d):0110 ELS response tag x%x completes "
"Data: x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
......@@ -1926,7 +2146,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
switch (flag) {
case ELS_CMD_ACC:
cmdsize = sizeof (uint32_t);
cmdsize = sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb) {
......@@ -1940,10 +2160,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
break;
case ELS_CMD_PLOGI:
cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
......@@ -1957,11 +2177,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
elsiocb->context_un.mbox = mbox;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof (uint32_t);
memcpy(pcmd, &vport->fc_sparam, sizeof (struct serv_parm));
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
break;
case ELS_CMD_PRLO:
cmdsize = sizeof (uint32_t) + sizeof (PRLO);
cmdsize = sizeof(uint32_t) + sizeof(PRLO);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
if (!elsiocb)
......@@ -1972,7 +2192,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
sizeof (uint32_t) + sizeof (PRLO));
sizeof(uint32_t) + sizeof(PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
......@@ -1988,9 +2208,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
"%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
"DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
phba->brd_no, elsiocb->iotag,
phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
......@@ -2029,7 +2249,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = 2 * sizeof (uint32_t);
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LS_RJT);
if (!elsiocb)
......@@ -2041,14 +2261,15 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
*((uint32_t *) (pcmd)) = rejectError;
/* Xmit ELS RJT <err> response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
phba->brd_no, rejectError, elsiocb->iotag,
"%d (%d):0129 Xmit ELS RJT x%x response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
phba->brd_no, vport->vpi, rejectError, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
......@@ -2076,7 +2297,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
uint16_t cmdsize;
int rc;
cmdsize = sizeof (uint32_t) + sizeof (ADISC);
cmdsize = sizeof(uint32_t) + sizeof(ADISC);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
......@@ -2088,21 +2309,21 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0130 Xmit ADISC ACC response iotag x%x xri: "
"%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
phba->brd_no, elsiocb->iotag,
phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
ap = (ADISC *) (pcmd);
ap->hardAL_PA = phba->fc_pref_ALPA;
memcpy(&ap->portName, &vport->fc_portname, sizeof (struct lpfc_name));
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof (struct lpfc_name));
memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ap->DID = be32_to_cpu(vport->fc_myDID);
phba->fc_stat.elsXmitACC++;
......@@ -2134,9 +2355,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = sizeof (uint32_t) + sizeof (PRLI);
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
if (!elsiocb)
return 1;
......@@ -2146,19 +2367,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0131 Xmit PRLI ACC response tag x%x xri x%x, "
"%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
phba->brd_no, elsiocb->iotag,
phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
memset(pcmd, 0, sizeof (PRLI));
memset(pcmd, 0, sizeof(PRLI));
npr = (PRLI *) pcmd;
vpd = &phba->vpd;
......@@ -2208,10 +2429,10 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
+ (2 * sizeof (struct lpfc_name));
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
if (format)
cmdsize += sizeof (RNID_TOP_DISC);
cmdsize += sizeof(RNID_TOP_DISC);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
......@@ -2224,30 +2445,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0132 Xmit RNID ACC response tag x%x "
"%d (%d):0132 Xmit RNID ACC response tag x%x "
"xri x%x\n",
phba->brd_no, elsiocb->iotag,
phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof (uint32_t);
pcmd += sizeof(uint32_t);
memset(pcmd, 0, sizeof (RNID));
memset(pcmd, 0, sizeof(RNID));
rn = (RNID *) (pcmd);
rn->Format = format;
rn->CommonLen = (2 * sizeof (struct lpfc_name));
memcpy(&rn->portName, &vport->fc_portname, sizeof (struct lpfc_name));
memcpy(&rn->nodeName, &vport->fc_nodename, sizeof (struct lpfc_name));
rn->CommonLen = (2 * sizeof(struct lpfc_name));
memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
switch (format) {
case 0:
rn->SpecificLen = 0;
break;
case RNID_TOPOLOGY_DISC:
rn->SpecificLen = sizeof (RNID_TOP_DISC);
rn->SpecificLen = sizeof(RNID_TOP_DISC);
memcpy(&rn->un.topologyDisc.portName,
&vport->fc_portname, sizeof (struct lpfc_name));
&vport->fc_portname, sizeof(struct lpfc_name));
rn->un.topologyDisc.unitType = RNID_HBA;
rn->un.topologyDisc.physPort = 0;
rn->un.topologyDisc.attachedNodes = 0;
......@@ -2344,22 +2565,15 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
return sentplogi;
}
int
void
lpfc_els_flush_rscn(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *mp;
int i;
for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
mp = vport->fc_rscn_id_list[i];
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_hbqbuf_free(phba, mp->virt, mp->phys);
else {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
vport->fc_rscn_id_list[i] = NULL;
}
spin_lock_irq(shost->host_lock);
......@@ -2367,7 +2581,6 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
return 0;
}
int
......@@ -2375,13 +2588,11 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
{
D_ID ns_did;
D_ID rscn_did;
struct lpfc_dmabuf *mp;
uint32_t *lp;
uint32_t payload_len, cmd, i, match;
uint32_t payload_len, i;
struct lpfc_hba *phba = vport->phba;
ns_did.un.word = did;
match = 0;
/* Never match fabric nodes for RSCNs */
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
......@@ -2392,45 +2603,40 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
return did;
for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
mp = vport->fc_rscn_id_list[i];
lp = (uint32_t *) mp->virt;
cmd = *lp++;
payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
payload_len -= sizeof (uint32_t); /* take off word 0 */
lp = vport->fc_rscn_id_list[i]->virt;
payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
payload_len -= sizeof(uint32_t); /* take off word 0 */
while (payload_len) {
rscn_did.un.word = *lp++;
rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
payload_len -= sizeof (uint32_t);
rscn_did.un.word = be32_to_cpu(*lp++);
payload_len -= sizeof(uint32_t);
switch (rscn_did.un.b.resv) {
case 0: /* Single N_Port ID effected */
if (ns_did.un.word == rscn_did.un.word)
match = did;
return did;
break;
case 1: /* Whole N_Port Area effected */
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area))
match = did;
return did;
break;
case 2: /* Whole N_Port Domain effected */
if (ns_did.un.b.domain == rscn_did.un.b.domain)
match = did;
break;
case 3: /* Whole Fabric effected */
match = did;
return did;
break;
default:
/* Unknown Identifier in RSCN node */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0217 Unknown Identifier in "
"RSCN payload Data: x%x\n",
phba->brd_no, rscn_did.un.word);
break;
}
if (match)
break;
"%d (%d):0217 Unknown "
"Identifier in RSCN payload "
"Data: x%x\n",
phba->brd_no, vport->vpi,
rscn_did.un.word);
case 3: /* Whole Fabric effected */
return did;
}
}
return match;
}
return 0;
}
static int
......@@ -2448,7 +2654,7 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
NLP_EVT_DEVICE_RECOVERY);
/*
* Make sure NLP_DELAY_TMO is NOT running after a device
......@@ -2468,25 +2674,26 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
struct lpfc_vport *next_vport;
uint32_t *lp, *datap;
IOCB_t *icmd;
uint32_t payload_len, cmd;
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt = vport->fc_rscn_id_cnt;
int rscn_id = 0, hba_id = 0;
int i;
icmd = &cmdiocb->iocb;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
cmd = *lp++;
payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
payload_len -= sizeof (uint32_t); /* take off word 0 */
cmd &= ELS_CMD_MASK;
payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
payload_len -= sizeof(uint32_t); /* take off word 0 */
/* RSCN received */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->fc_flag, payload_len, *lp,
vport->fc_rscn_id_cnt);
"%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
*lp, rscn_cnt);
for (i = 0; i < payload_len/sizeof(uint32_t); i++)
fc_host_post_event(shost, fc_get_event_number(),
......@@ -2497,32 +2704,77 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*/
if (vport->port_state <= LPFC_NS_QRY) {
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
newnode);
newnode);
return 0;
}
/* If this RSCN just contains NPortIDs for other vports on this HBA,
* just ACC and ignore it.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(phba->cfg_peer_port_login)) {
i = payload_len;
datap = lp;
while (i > 0) {
nportid = *datap++;
nportid = ((be32_to_cpu(nportid)) & Mask_DID);
i -= sizeof(uint32_t);
rscn_id++;
list_for_each_entry(next_vport, &phba->port_list,
listentry) {
if (nportid == next_vport->fc_myDID) {
hba_id++;
break;
}
}
}
if (rscn_id == hba_id) {
/* ALL NPortIDs in RSCN are on HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
*lp, rscn_cnt);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL, newnode);
return 0;
}
}
/* If we are already processing an RSCN, save the received
* RSCN payload buffer, cmdiocb->context2 to process later.
*/
if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
if ((vport->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
vport->fc_flag |= FC_RSCN_DEFERRED;
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE;
spin_unlock_irq(shost->host_lock);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/* If we zero, cmdiocb->context2, the calling
* routine will not try to free it.
*/
cmdiocb->context2 = NULL;
if (rscn_cnt) {
cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
}
if ((rscn_cnt) &&
(payload_len + length <= LPFC_BPL_SIZE)) {
*cmd &= ELS_CMD_MASK;
*cmd |= be32_to_cpu(payload_len + length);
memcpy(((uint8_t *)cmd) + length, lp,
payload_len);
} else {
vport->fc_rscn_id_list[rscn_cnt] = pcmd;
vport->fc_rscn_id_cnt++;
/* If we zero, cmdiocb->context2, the calling
* routine will not try to free it.
*/
cmdiocb->context2 = NULL;
}
/* Deferred RSCN */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0235 Deferred RSCN "
"%d (%d):0235 Deferred RSCN "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->fc_rscn_id_cnt,
vport->fc_flag,
phba->brd_no, vport->vpi,
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
} else {
spin_lock_irq(shost->host_lock);
......@@ -2530,10 +2782,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
/* ReDiscovery RSCN */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0234 ReDiscovery RSCN "
"%d (%d):0234 ReDiscovery RSCN "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->fc_rscn_id_cnt,
vport->fc_flag,
phba->brd_no, vport->vpi,
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
}
/* Send back ACC */
......@@ -2542,6 +2794,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
vport->fc_flag &= ~FC_RSCN_DEFERRED;
return 0;
}
......@@ -2572,13 +2825,19 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
struct lpfc_hba *phba = vport->phba;
/* Ignore RSCN if the port is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
lpfc_els_flush_rscn(vport);
return 0;
}
/* Start timer for RSCN processing */
lpfc_set_disctmo(vport);
/* RSCN processed */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
phba->brd_no,
"%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
vport->port_state);
......@@ -2587,7 +2846,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
if (lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT) == 0)
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
/* Wait for NameServer query cmpl before we can
continue */
return 1;
......@@ -2649,9 +2908,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d:0113 An FLOGI ELS command x%x was received "
"from DID x%x in Loop Mode\n",
phba->brd_no, cmd, did);
"%d (%d):0113 An FLOGI ELS command x%x was "
"received from DID x%x in Loop Mode\n",
phba->brd_no, vport->vpi, cmd, did);
return 1;
}
......@@ -2663,7 +2922,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*/
rc = memcmp(&vport->fc_portname, &sp->portName,
sizeof (struct lpfc_name));
sizeof(struct lpfc_name));
if (!rc) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
......@@ -2802,7 +3061,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof (uint32_t); /* Skip past command */
pcmd += sizeof(uint32_t); /* Skip past command */
rps_rsp = (RPS_RSP *)pcmd;
if (phba->fc_topology != TOPOLOGY_LOOP)
......@@ -2823,9 +3082,10 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Xmit ELS RPS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
phba->brd_no, elsiocb->iotag,
"%d (%d):0118 Xmit ELS RPS ACC response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
phba->brd_no, ndlp->vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
......@@ -2865,14 +3125,17 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((flag == 0) ||
((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
sizeof (struct lpfc_name)) == 0))) {
sizeof(struct lpfc_name)) == 0))) {
printk("Fix me....\n");
dump_stack();
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->context1 =
(void *)((unsigned long)cmdiocb->iocb.ulpContext);
(void *)((unsigned long) cmdiocb->iocb.ulpContext);
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
if (lpfc_sli_issue_mbox (phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
......@@ -2915,7 +3178,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof (uint16_t);
pcmd += sizeof(uint16_t);
*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
pcmd += sizeof(uint16_t);
......@@ -2932,9 +3195,10 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
/* Xmit ELS RPL ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
phba->brd_no, elsiocb->iotag,
"%d (%d):0120 Xmit ELS RPL ACC response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
......@@ -3008,8 +3272,8 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* FARP-REQ received from DID <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0601 FARP-REQ received from DID x%x\n",
phba->brd_no, did);
"%d (%d):0601 FARP-REQ received from DID x%x\n",
phba->brd_no, vport->vpi, did);
/* We will only support match on WWPN or WWNN */
if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
......@@ -3020,14 +3284,14 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* If this FARP command is searching for my portname */
if (fp->Mflags & FARP_MATCH_PORT) {
if (memcmp(&fp->RportName, &vport->fc_portname,
sizeof (struct lpfc_name)) == 0)
sizeof(struct lpfc_name)) == 0)
cnt = 1;
}
/* If this FARP command is searching for my nodename */
if (fp->Mflags & FARP_MATCH_NODE) {
if (memcmp(&fp->RnodeName, &vport->fc_nodename,
sizeof (struct lpfc_name)) == 0)
sizeof(struct lpfc_name)) == 0)
cnt = 1;
}
......@@ -3068,8 +3332,8 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
cmd = *lp++;
/* FARP-RSP received from DID <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0600 FARP-RSP received from DID x%x\n",
phba->brd_no, did);
"%d (%d):0600 FARP-RSP received from DID x%x\n",
phba->brd_no, vport->vpi, did);
/* ACCEPT the Farp resp request */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
......@@ -3090,8 +3354,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* FAN received */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0265 FAN received\n",
phba->brd_no);
"%d (%d):0265 FAN received\n",
phba->brd_no, vport->vpi);
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
......@@ -3099,7 +3363,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lp = (uint32_t *)pcmd->virt;
cmd = *lp++;
fp = (FAN *)lp;
fp = (FAN *) lp;
/* FAN received; Fan does not have a reply sequence */
......@@ -3178,10 +3442,15 @@ lpfc_els_timeout(unsigned long ptr)
spin_lock_irqsave(&vport->work_port_lock, iflag);
if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
else
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
return;
}
......@@ -3221,17 +3490,19 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (pcmd)
els_command = *(uint32_t *) (pcmd->virt);
if ((els_command == ELS_CMD_FARP)
|| (els_command == ELS_CMD_FARPR)) {
if (els_command == ELS_CMD_FARP ||
els_command == ELS_CMD_FARPR ||
els_command == ELS_CMD_FDISC)
continue;
if (vport != piocb->vport)
continue;
}
if (piocb->drvrTimeout > 0) {
if (piocb->drvrTimeout >= timeout) {
if (piocb->drvrTimeout >= timeout)
piocb->drvrTimeout -= timeout;
} else {
else
piocb->drvrTimeout = 0;
}
continue;
}
......@@ -3245,11 +3516,10 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
remote_ID = ndlp->nlp_DID;
}
lpfc_printf_log(phba,
KERN_ERR,
LOG_ELS,
"%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
phba->brd_no, els_command,
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0127 ELS timeout Data: x%x x%x x%x "
"x%x\n",
phba->brd_no, vport->vpi, els_command,
remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
......@@ -3268,6 +3538,11 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
struct lpfc_dmabuf *pcmd;
uint32_t *elscmd;
uint32_t els_command;
lpfc_fabric_abort_vport(vport);
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
......@@ -3284,6 +3559,10 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
cmd->ulpCommand == CMD_ABORT_XRI_CN)
continue;
pcmd = (struct lpfc_dmabuf *) piocb->context2;
elscmd = (uint32_t *) (pcmd->virt);
els_command = *elscmd;
if (piocb->vport != vport)
continue;
......@@ -3306,7 +3585,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &piocb->iocb;
list_del(&piocb->list);
list_del_init(&piocb->list);
if (!piocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, piocb);
......@@ -3322,21 +3601,20 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_dmabuf *mp,
struct lpfc_iocbq *elsiocb)
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *lp;
uint32_t *payload;
uint32_t cmd, did, newnode, rjt_err = 0;
IOCB_t *icmd = &elsiocb->iocb;
if (!vport || !mp)
if (vport == NULL || elsiocb->context2 == NULL)
goto dropit;
newnode = 0;
lp = (uint32_t *) mp->virt;
cmd = *lp++;
payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
lpfc_post_buffer(phba, pring, 1, 1);
......@@ -3347,6 +3625,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (lpfc_els_chk_latt(vport))
goto dropit;
/* Ignore traffic recevied during vport shutdown. */
if (vport->load_flag & FC_UNLOADING)
goto dropit;
did = icmd->un.rcvels.remoteID;
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
......@@ -3367,7 +3649,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (elsiocb->context1)
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->context2 = mp;
elsiocb->vport = vport;
if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
......@@ -3375,18 +3656,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0112 ELS command x%x received from NPORT x%x "
"Data: x%x\n", phba->brd_no, cmd, did,
"%d (%d):0112 ELS command x%x received from NPORT x%x "
"Data: x%x\n", phba->brd_no, vport->vpi, cmd, did,
vport->port_state);
switch (cmd) {
case ELS_CMD_PLOGI:
phba->fc_stat.elsRcvPLOGI++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = 1;
if ((vport->port_state < LPFC_DISC_AUTH) ||
((vport->port_type == LPFC_NPIV_PORT &&
phba->cfg_vport_restrict_login))) {
rjt_err = 2;
break;
}
ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
break;
......@@ -3482,13 +3765,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
break;
default:
/* Unsupported ELS command, reject */
rjt_err = 1;
rjt_err = 2;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d:0115 Unknown ELS command x%x "
"%d (%d):0115 Unknown ELS command x%x "
"received from NPORT x%x\n",
phba->brd_no, cmd, did);
phba->brd_no, vport->vpi, cmd, did);
if (newnode)
lpfc_drop_node(vport, ndlp);
break;
......@@ -3496,96 +3779,742 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* check if need to LS_RJT received ELS cmd */
if (rjt_err) {
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
memset(&stat, 0, sizeof(stat));
if (rjt_err == 1)
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
else
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
stat.un.b.vendorUnique = 0;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp);
if (newnode)
lpfc_drop_node(vport, ndlp);
}
return;
dropit:
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d:0111 Dropping received ELS cmd "
"%d (%d):0111 Dropping received ELS cmd "
"Data: x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport ? vport->vpi : 0xffff,
icmd->ulpStatus, icmd->un.ulpWord[4],
icmd->ulpTimeout);
phba->fc_stat.elsRcvDrop++;
}
static struct lpfc_vport *
lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->vpi == vpi)
return vport;
}
return NULL;
}
void
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp = NULL;
IOCB_t *icmd = &elsiocb->iocb;
struct hbq_dmabuf *sp = NULL;
dma_addr_t paddr;
struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
elsiocb->context2 = NULL;
elsiocb->context3 = NULL;
if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
(icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_hbqbuf_fill_hbq(phba);
else
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba, pring, 0, 1);
return;
}
/* If there are no BDEs associated with this IOCB,
* there is nothing to do.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
if (icmd->unsli3.rcvsli3.vpi == 0xffff)
vport = phba->pport;
else {
uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
vport = lpfc_find_vport_by_vpid(phba, vpi);
}
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
if (icmd->ulpBdeCount == 0)
return;
/* type of ELS cmd is first 32bit word in packet */
/* type of ELS cmd is first 32bit word
* in packet
*/
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
paddr = getPaddr(icmd->un.cont64[0].addrHigh,
icmd->un.cont64[0].addrLow);
sp = lpfc_sli_hbqbuf_find(phba, icmd->un.ulpWord[3]);
if (sp)
phba->hbq_buff_count--;
mp = sp ? &sp->dbuf : NULL;
elsiocb->context2 = bdeBuf1;
} else {
paddr = getPaddr(icmd->un.cont64[0].addrHigh,
icmd->un.cont64[0].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring, paddr);
elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
paddr);
}
lpfc_els_unsol_buffer(phba, pring, vport, mp, elsiocb);
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
* if they are done with "mp" by setting context2 to NULL.
*/
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = NULL;
if (elsiocb->context2) {
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_free_hbq(phba, sp);
else {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
elsiocb->context2 = NULL;
}
/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) != 0 &&
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
icmd->ulpBdeCount == 2) {
sp = lpfc_sli_hbqbuf_find(phba, icmd->un.ulpWord[15]);
if (sp)
phba->hbq_buff_count--;
mp = sp ? &sp->dbuf : NULL;
lpfc_els_unsol_buffer(phba, pring, vport, mp, elsiocb);
elsiocb->context2 = bdeBuf2;
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/* free mp if we are done with it */
if (elsiocb->context2) {
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_free_hbq(phba, sp);
else {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
lpfc_in_buf_free(phba, elsiocb->context2);
elsiocb->context2 = NULL;
}
}
}
void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp) {
if (phba->fc_topology == TOPOLOGY_LOOP) {
lpfc_disc_start(vport);
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0251 NameServer login: no memory\n",
phba->brd_no, vport->vpi);
return;
}
lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_type |= NLP_FABRIC;
}
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0252 Cannot issue NameServer login\n",
phba->brd_no, vport->vpi);
return;
}
if (phba->cfg_fdmi_on) {
ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
GFP_KERNEL);
if (ndlp_fdmi) {
lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
ndlp_fdmi->nlp_type |= NLP_FABRIC;
ndlp_fdmi->nlp_state =
NLP_STE_PLOGI_ISSUE;
lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
0);
}
}
return;
}
static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
MAILBOX_t *mb = &pmb->mb;
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
lpfc_nlp_put(ndlp);
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0915 Register VPI failed: 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
switch (mb->mbxStatus) {
case 0x11: /* unsupported feature */
case 0x9603: /* max_vpi exceeded */
/* giving up on vport registration */
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
break;
default:
/* Try to recover from this error */
lpfc_mbx_unreg_vpi(vport);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
lpfc_initial_fdisc(vport);
break;
}
} else {
if (vport == phba->pport)
lpfc_issue_fabric_reglogin(vport);
else
lpfc_do_scr_ns_plogi(phba, vport);
}
mempool_free(pmb, phba->mbox_mem_pool);
return;
}
void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
LPFC_MBOXQ_t *mbox;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
if (lpfc_sli_issue_mbox(phba, mbox,
MBX_NOWAIT | MBX_STOP_IOCB)
== MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0253 Register VPI: Cannot send mbox\n",
phba->brd_no, vport->vpi);
}
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0254 Register VPI: no memory\n",
phba->brd_no, vport->vpi);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
lpfc_nlp_put(ndlp);
}
}
static void
lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n",
phba->brd_no, vport->vpi,
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
/* Since all FDISCs are being single threaded, we
* must reset the discovery timer for ALL vports
* waiting to send FDISC when one completes.
*/
list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
lpfc_set_disctmo(piocb->vport);
}
if (irsp->ulpStatus) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
/* FDISC failed */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0124 FDISC failed. (%d/%d)\n",
phba->brd_no, vport->vpi,
irsp->ulpStatus, irsp->un.ulpWord[4]);
if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_nlp_put(ndlp);
/* giving up on FDISC. Cancel discovery timer */
lpfc_can_disctmo(vport);
} else {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_FABRIC;
if (vport->phba->fc_topology == TOPOLOGY_LOOP)
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
if ((vport->fc_prevDID != vport->fc_myDID) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
* issue unreg_vpi.
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
if (np->nlp_state != NLP_STE_NPR_NODE
|| !(np->nlp_flag & NLP_NPR_ADISC))
continue;
spin_lock_irq(shost->host_lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_mbx_unreg_vpi(vport);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
}
if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
}
out:
lpfc_els_free_iocb(phba, cmdiocb);
}
int
lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct serv_parm *sp;
uint8_t *pcmd;
uint16_t cmdsize;
int did = ndlp->nlp_DID;
int rc;
int new_ndlp = 0;
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_FDISC);
if (!elsiocb) {
if (new_ndlp)
mempool_free(ndlp, phba->nlp_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0255 Issue FDISC: no IOCB\n",
phba->brd_no, vport->vpi);
return 1;
}
icmd = &elsiocb->iocb;
icmd->un.elsreq64.myID = 0;
icmd->un.elsreq64.fl = 1;
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
pcmd += sizeof(uint32_t); /* CSP Word 1 */
memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
pcmd += sizeof(uint32_t); /* CSP Word 2 */
pcmd += sizeof(uint32_t); /* CSP Word 3 */
pcmd += sizeof(uint32_t); /* CSP Word 4 */
pcmd += sizeof(uint32_t); /* Port Name */
memcpy(pcmd, &vport->fc_portname, 8);
pcmd += sizeof(uint32_t); /* Node Name */
pcmd += sizeof(uint32_t); /* Node Name */
memcpy(pcmd, &vport->fc_nodename, 8);
lpfc_set_disctmo(vport);
phba->fc_stat.elsXmitFDISC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
if (new_ndlp)
mempool_free(ndlp, phba->nlp_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0256 Issue FDISC: Cannot send IOCB\n",
phba->brd_no, vport->vpi);
return 1;
}
lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
vport->port_state = LPFC_FDISC;
return 0;
}
static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
lpfc_els_free_iocb(phba, cmdiocb);
vport->unreg_vpi_cmpl = VPORT_ERROR;
}
int
lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
ELS_CMD_LOGO);
if (!elsiocb)
return 1;
icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
/* Fill in LOGO payload */
*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
return 0;
}
void
lpfc_fabric_block_timeout(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflags;
uint32_t tmo_posted;
spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
if (!tmo_posted) {
spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->work_wait)
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
}
static void
lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
{
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *cmd;
repeat:
iocb = NULL;
spin_lock_irqsave(&phba->hbalock, iflags);
/* Post any pending iocb to the SLI layer */
if (atomic_read(&phba->fabric_iocb_count) == 0) {
list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
list);
if (iocb)
atomic_inc(&phba->fabric_iocb_count);
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb) {
iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
iocb->iocb_flag |= LPFC_IO_FABRIC;
ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
if (ret == IOCB_ERROR) {
iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
iocb->fabric_iocb_cmpl = NULL;
iocb->iocb_flag &= ~LPFC_IO_FABRIC;
cmd = &iocb->iocb;
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
iocb->iocb_cmpl(phba, iocb, iocb);
atomic_dec(&phba->fabric_iocb_count);
goto repeat;
}
}
return;
}
void
lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
{
clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
lpfc_resume_fabric_iocbs(phba);
return;
}
static void
lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
{
int blocked;
blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
/* Start a timer to unblock fabric
* iocbs after 100ms
*/
if (!blocked)
mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
return;
}
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct ls_rjt stat;
if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
BUG();
switch (rspiocb->iocb.ulpStatus) {
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
lpfc_block_fabric_iocbs(phba);
}
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
lpfc_block_fabric_iocbs(phba);
break;
case IOSTAT_LS_RJT:
stat.un.lsRjtError =
be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
lpfc_block_fabric_iocbs(phba);
break;
}
if (atomic_read(&phba->fabric_iocb_count) == 0)
BUG();
cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
cmdiocb->fabric_iocb_cmpl = NULL;
cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
atomic_dec(&phba->fabric_iocb_count);
if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
/* Post any pending iocbs to HBA */
lpfc_resume_fabric_iocbs(phba);
}
}
int
lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
{
unsigned long iflags;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
int ready;
int ret;
if (atomic_read(&phba->fabric_iocb_count) > 1)
BUG();
spin_lock_irqsave(&phba->hbalock, iflags);
ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ready) {
iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
iocb->iocb_flag |= LPFC_IO_FABRIC;
atomic_inc(&phba->fabric_iocb_count);
ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
if (ret == IOCB_ERROR) {
iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
iocb->fabric_iocb_cmpl = NULL;
iocb->iocb_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
} else {
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&iocb->list, &phba->fabric_iocb_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
ret = IOCB_SUCCESS;
}
return ret;
}
void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
list) {
if (piocb->vport != vport)
continue;
list_move_tail(&piocb->list, &completions);
}
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del_init(&piocb->list);
cmd = &piocb->iocb;
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(piocb->iocb_cmpl) (phba, piocb, piocb);
}
}
void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
struct lpfc_hba *phba = ndlp->vport->phba;
struct lpfc_iocbq *tmp_iocb, *piocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *cmd;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
list) {
if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
list_move_tail(&piocb->list, &completions);
}
}
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del_init(&piocb->list);
cmd = &piocb->iocb;
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(piocb->iocb_cmpl) (phba, piocb, piocb);
}
}
void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_iocbq *piocb;
IOCB_t *cmd;
spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->fabric_iocb_list, &completions);
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del_init(&piocb->list);
cmd = &piocb->iocb;
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(piocb->iocb_cmpl) (phba, piocb, piocb);
}
}
void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd;
struct lpfc_nodelist *ndlp;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
list) {
cmd = &piocb->iocb;
ndlp = (struct lpfc_nodelist *) piocb->context1;
if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
ndlp != NULL &&
ndlp->nlp_DID == Fabric_DID)
list_move_tail(&piocb->list, &completions);
}
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del_init(&piocb->list);
cmd = &piocb->iocb;
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(piocb->iocb_cmpl) (phba, piocb, piocb);
}
}
......@@ -36,6 +36,7 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
/* AlpaArray for assignment of scsid for scan-down and bind_method */
static uint8_t lpfcAlpaArray[] = {
......@@ -96,50 +97,68 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
int warn_on = 0;
struct lpfc_hba *phba;
struct lpfc_vport *vport;
int put_node;
int put_rport;
rdata = rport->dd_data;
ndlp = rdata->pnode;
if (!ndlp) {
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
if (rport->scsi_target_id != -1) {
printk(KERN_ERR "Cannot find remote node"
" for rport in dev_loss_tmo_callbk x%x\n",
rport->port_id);
" for rport in dev_loss_tmo_callbk x%x\n",
rport->port_id);
}
return;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
if (ndlp->nlp_type & NLP_FABRIC) {
/* We will clean up these Nodes in linkup */
put_node = rdata->pnode != NULL;
put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
if (put_node)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
return;
}
name = (uint8_t *)&ndlp->nlp_portname;
vport = ndlp->vport;
phba = vport->phba;
if (!(vport->load_flag & FC_UNLOADING) &&
ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
/* flush the target */
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
if (vport->load_flag & FC_UNLOADING)
warn_on = 0;
if (warn_on) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0203 Devloss timeout on "
"%d (%d):0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0204 Devloss timeout on "
"%d (%d):0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
......@@ -152,12 +171,23 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
else {
put_node = rdata->pnode != NULL;
put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
lpfc_nlp_put(ndlp);
put_device(&rport->dev);
if (put_node)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
}
return;
}
void
lpfc_worker_wake_up(struct lpfc_hba *phba)
{
wake_up(phba->work_wait);
return;
}
......@@ -166,6 +196,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
{
struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp;
struct lpfc_vport *vport;
int free_evt;
spin_lock_irq(&phba->hbalock);
......@@ -175,10 +206,23 @@ lpfc_work_list_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_DEV_LOSS:
free_evt = 0; /* evt is part of ndlp */
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
vport = ndlp->vport;
if (!vport)
break;
if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
break;
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
lpfc_els_retry_delay_handler(ndlp);
free_evt = 0;
free_evt = 0; /* evt is part of ndlp */
break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
......@@ -250,24 +294,43 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
vport = phba->pport;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
work_port_events = vport->work_port_events;
if (!scsi_host_get(shost)) {
continue;
}
spin_unlock_irq(&phba->hbalock);
work_port_events = vport->work_port_events;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
lpfc_els_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
lpfc_els_timeout_handler(vport);
if (work_port_events & WORKER_MBOX_TMO)
lpfc_mbox_timeout_handler(phba);
if (work_port_events & WORKER_MBOX_TMO)
lpfc_mbox_timeout_handler(phba);
if (work_port_events & WORKER_FDMI_TMO)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
lpfc_unblock_fabric_iocbs(phba);
spin_lock_irq(&phba->hbalock);
vport->work_port_events &= ~work_port_events;
if (work_port_events & WORKER_FDMI_TMO)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
spin_lock_irq(&vport->work_port_lock);
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
scsi_host_put(shost);
spin_lock_irq(&phba->hbalock);
}
spin_unlock_irq(&phba->hbalock);
for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
......@@ -300,24 +363,41 @@ lpfc_work_done(struct lpfc_hba *phba)
static int
check_work_wait_done(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
int rc = 0;
if (!vport)
return 0;
struct lpfc_vport *vport;
struct lpfc_sli_ring *pring;
int i, rc = 0;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->work_port_events) {
rc = 1;
goto exit;
}
}
if (phba->work_ha ||
vport->work_port_events ||
(!list_empty(&phba->work_list)) ||
kthread_should_stop())
if (phba->work_ha || (!list_empty(&phba->work_list)) ||
kthread_should_stop()) {
rc = 1;
goto exit;
}
for (i = 0; i < phba->sli.num_rings; i++) {
pring = &phba->sli.ring[i];
if (pring->flag & LPFC_DEFERRED_RING_EVENT) {
rc = 1;
goto exit;
}
}
exit:
if (rc)
phba->work_found++;
else
phba->work_found = 0;
spin_unlock_irq(&phba->hbalock);
return rc;
}
int
lpfc_do_work(void *p)
{
......@@ -327,11 +407,13 @@ lpfc_do_work(void *p)
set_user_nice(current, -20);
phba->work_wait = &work_waitq;
phba->work_found = 0;
while (1) {
rc = wait_event_interruptible(work_waitq,
check_work_wait_done(phba));
check_work_wait_done(phba));
BUG_ON(rc);
if (kthread_should_stop())
......@@ -339,6 +421,17 @@ lpfc_do_work(void *p)
lpfc_work_done(phba);
/* If there is alot of slow ring work, like during link up
* check_work_wait_done() may cause this thread to not give
* up the CPU for very long periods of time. This may cause
* soft lockups or other problems. To avoid these situations
* give up the CPU here after LPFC_MAX_WORKER_ITERATION
* consecutive iterations.
*/
if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
phba->work_found = 0;
schedule();
}
}
phba->work_wait = NULL;
return 0;
......@@ -360,7 +453,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
* All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
* be queued to worker thread for processing
*/
evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
if (!evtp)
return 0;
......@@ -371,37 +464,94 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags);
return 1;
}
void
lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
int rc;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
if (!remove && ndlp->nlp_type & NLP_FABRIC)
continue;
rc = lpfc_disc_state_machine(vport, ndlp, NULL,
remove
? NLP_EVT_DEVICE_RM
: NLP_EVT_DEVICE_RECOVERY);
}
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
lpfc_mbx_unreg_vpi(vport);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
}
}
static void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
/* Cleanup any outstanding RSCN activity */
lpfc_els_flush_rscn(vport);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
lpfc_cleanup_rpis(vport, 0);
/* free any ndlp's on unused list */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
/* free any ndlp's in unused state */
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
lpfc_drop_node(vport, ndlp);
/* Turn off discovery timer if its running */
lpfc_can_disctmo(vport);
}
int
lpfc_linkdown(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport *port_iterator;
LPFC_MBOXQ_t *mb;
int rc;
psli = &phba->sli;
if (phba->link_state == LPFC_LINK_DOWN) {
return 0;
}
spin_lock_irq(&phba->hbalock);
if (phba->link_state > LPFC_LINK_DOWN)
if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN;
phba->pport->fc_flag &= ~FC_LBIT;
}
spin_unlock_irq(&phba->hbalock);
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(port_iterator);
}
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_unreg_did(phba, 0xffffffff, mb);
lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
......@@ -410,31 +560,13 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
}
/* Cleanup any outstanding RSCN activity */
lpfc_els_flush_rscn(vport);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
/*
* Issue a LINK DOWN event to all nodes.
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
/* free any ndlp's on unused state */
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
lpfc_drop_node(vport, ndlp);
else /* otherwise, force node recovery. */
rc = lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
}
/* Setup myDID for link up if we are in pt2pt mode */
if (vport->fc_flag & FC_PT2PT) {
vport->fc_myDID = 0;
if (phba->pport->fc_flag & FC_PT2PT) {
phba->pport->fc_myDID = 0;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_config_link(phba, mb);
mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mb->vport = vport;
if (lpfc_sli_issue_mbox(phba, mb,
(MBX_NOWAIT | MBX_STOP_IOCB))
......@@ -443,66 +575,88 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
spin_unlock_irq(shost->host_lock);
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_LBIT;
spin_unlock_irq(shost->host_lock);
return 0;
}
/* Turn off discovery timer if its running */
lpfc_can_disctmo(vport);
static void
lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
/* Must process IOCBs on all rings to handle ABORTed I/Os */
return 0;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
/* On Linkup its safe to clean up the ndlp
* from Fabric connections.
*/
if (ndlp->nlp_DID != Fabric_DID)
lpfc_unreg_rpi(vport, ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding IO now since device is
* marked for PLOGI.
*/
lpfc_unreg_rpi(vport, ndlp);
}
}
}
static int
lpfc_linkup(struct lpfc_hba *phba)
static void
lpfc_linkup_port(struct lpfc_vport *vport)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_hba *phba = vport->phba;
if ((vport->load_flag & FC_UNLOADING) != 0)
return;
/* If NPIV is not enabled, only bring the physical port up */
if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(vport != phba->pport))
return;
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
phba->link_state = LPFC_LINK_UP;
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
if (vport->fc_flag & FC_LBIT)
lpfc_linkup_cleanup_nodes(vport);
if (vport->fc_flag & FC_LBIT) {
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
if (ndlp->nlp_type & NLP_FABRIC) {
/*
* On Linkup its safe to clean up the
* ndlp from Fabric connections.
*/
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_UNUSED_NODE);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/*
* Fail outstanding IO now since
* device is marked for PLOGI.
*/
lpfc_unreg_rpi(vport, ndlp);
}
}
}
}
/* free any ndlp's in unused state */
/* free any ndlp's in unused state */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
nlp_listp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
lpfc_drop_node(vport, ndlp);
}
static int
lpfc_linkup(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
phba->link_state = LPFC_LINK_UP;
/* Unblock fabric iocbs if they are blocked */
clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
del_timer_sync(&phba->fabric_block_timer);
list_for_each_entry(vport, &phba->port_list, listentry) {
lpfc_linkup_port(vport);
}
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_clear_la(phba, phba->pport);
return 0;
}
......@@ -529,18 +683,28 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <port_state> */
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d:0320 CLEAR_LA mbxStatus error x%x hba "
"%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
phba->brd_no, mb->mbxStatus, vport->port_state);
phba->brd_no, vport->vpi, mb->mbxStatus,
vport->port_state);
phba->link_state = LPFC_HBA_ERROR;
goto out;
}
if (vport->fc_flag & FC_ABORT_DISCOVERY)
goto out;
if (vport->port_type == LPFC_PHYSICAL_PORT)
phba->link_state = LPFC_HBA_READY;
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(&phba->hbalock);
return;
vport->num_disc_nodes = 0;
/* go thru NPR nodes and issue ELS PLOGIs */
......@@ -558,8 +722,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
out:
/* Device Discovery completes */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0225 Device Discovery completes\n",
phba->brd_no);
"%d (%d):0225 Device Discovery completes\n",
phba->brd_no, vport->vpi);
mempool_free(pmb, phba->mbox_mem_pool);
......@@ -589,8 +753,6 @@ static void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_sli *psli = &phba->sli;
int rc;
if (pmb->mb.mbxStatus)
goto out;
......@@ -606,49 +768,40 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
lpfc_set_disctmo(vport);
return;
}
}
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
if (vport->port_state != LPFC_FLOGI) {
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
}
return;
out:
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d:0306 CONFIG_LINK mbxStatus error x%x "
"%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
phba->brd_no, pmb->mb.mbxStatus, vport->port_state);
phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
vport->port_state);
lpfc_linkdown(phba);
mempool_free(pmb, phba->mbox_mem_pool);
phba->link_state = LPFC_HBA_ERROR;
lpfc_linkdown(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0200 CONFIG_LINK bad hba state x%x\n",
phba->brd_no, vport->port_state);
"%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
phba->brd_no, vport->vpi, vport->port_state);
lpfc_clear_la(phba, pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_disc_flush_list(vport);
psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
vport->port_state = LPFC_VPORT_READY;
}
lpfc_issue_clear_la(phba, vport);
return;
}
static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
struct lpfc_vport *vport = pmb->vport;
......@@ -658,12 +811,12 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d:0319 READ_SPARAM mbxStatus error x%x "
"%d (%d):0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
phba->brd_no, mb->mbxStatus, vport->port_state);
phba->brd_no, vport->vpi, mb->mbxStatus,
vport->port_state);
lpfc_linkdown(phba);
phba->link_state = LPFC_HBA_ERROR;
goto out;
}
......@@ -675,12 +828,15 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->cfg_soft_wwpn)
u64_to_wwn(phba->cfg_soft_wwpn,
vport->fc_sparam.portName.u.wwn);
memcpy((uint8_t *) &vport->fc_nodename,
(uint8_t *) &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
memcpy((uint8_t *) &vport->fc_portname,
(uint8_t *) &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof(vport->fc_nodename));
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof(vport->fc_portname));
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
}
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
......@@ -690,35 +846,15 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
if (phba->link_state != LPFC_CLEAR_LA) {
struct lpfc_sli_ring *extra_ring =
&psli->ring[psli->extra_ring];
struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
lpfc_clear_la(phba, pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
== MBX_NOT_FINISHED) {
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_disc_flush_list(vport);
extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
vport->port_state = LPFC_VPORT_READY;
}
} else {
mempool_free(pmb, phba->mbox_mem_pool);
}
lpfc_issue_clear_la(phba, vport);
mempool_free(pmb, phba->mbox_mem_pool);
return;
}
static void
lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
int i;
struct lpfc_dmabuf *mp;
......@@ -727,30 +863,32 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
spin_lock_irq(shost->host_lock);
spin_lock_irq(&phba->hbalock);
switch (la->UlnkSpeed) {
case LA_1GHZ_LINK:
phba->fc_linkspeed = LA_1GHZ_LINK;
break;
case LA_2GHZ_LINK:
phba->fc_linkspeed = LA_2GHZ_LINK;
break;
case LA_4GHZ_LINK:
phba->fc_linkspeed = LA_4GHZ_LINK;
break;
case LA_8GHZ_LINK:
phba->fc_linkspeed = LA_8GHZ_LINK;
break;
default:
phba->fc_linkspeed = LA_UNKNW_LINK;
break;
case LA_1GHZ_LINK:
phba->fc_linkspeed = LA_1GHZ_LINK;
break;
case LA_2GHZ_LINK:
phba->fc_linkspeed = LA_2GHZ_LINK;
break;
case LA_4GHZ_LINK:
phba->fc_linkspeed = LA_4GHZ_LINK;
break;
case LA_8GHZ_LINK:
phba->fc_linkspeed = LA_8GHZ_LINK;
break;
default:
phba->fc_linkspeed = LA_UNKNW_LINK;
break;
}
phba->fc_topology = la->topology;
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
if (phba->fc_topology == TOPOLOGY_LOOP) {
/* Get Loop Map information */
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
/* Get Loop Map information */
if (la->il)
vport->fc_flag |= FC_LBIT;
......@@ -784,30 +922,35 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
}
/* Link Up Event ALPA map */
lpfc_printf_log(phba,
KERN_WARNING,
LOG_LINK_EVENT,
"%d:1304 Link Up Event "
"ALPA map Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
un.pa.wd1, un.pa.wd2,
un.pa.wd3, un.pa.wd4);
KERN_WARNING,
LOG_LINK_EVENT,
"%d:1304 Link Up Event "
"ALPA map Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
un.pa.wd1, un.pa.wd2,
un.pa.wd3, un.pa.wd4);
}
}
}
} else {
if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
if (phba->max_vpi && lpfc_npiv_enable &&
(phba->sli_rev == 3))
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
vport->fc_flag |= FC_LBIT;
}
spin_unlock_irq(shost->host_lock);
spin_unlock_irq(&phba->hbalock);
lpfc_linkup(phba);
if (sparam_mbox) {
lpfc_read_sparam(phba, sparam_mbox);
lpfc_read_sparam(phba, sparam_mbox, 0);
sparam_mbox->vport = vport;
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
......@@ -815,7 +958,7 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
mempool_free(sparam_mbox, phba->mbox_mem_pool);
if (cfglink_mbox)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
return;
goto out;
}
}
......@@ -825,10 +968,20 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
cfglink_mbox->vport = vport;
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc != MBX_NOT_FINISHED)
return;
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
}
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
phba->brd_no, vport->vpi,
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
return;
}
static void
......@@ -886,12 +1039,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
if (((phba->fc_eventTag + 1) < la->eventTag) ||
(phba->fc_eventTag == la->eventTag)) {
(phba->fc_eventTag == la->eventTag)) {
phba->fc_stat.LinkMultiEvent++;
if (la->attType == AT_LINK_UP)
if (phba->fc_eventTag != 0)
lpfc_linkdown(phba);
}
}
phba->fc_eventTag = la->eventTag;
......@@ -912,7 +1065,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
la->granted_AL_PA, la->UlnkSpeed,
phba->alpa_map[0]);
}
lpfc_mbx_process_link_up(vport, la);
lpfc_mbx_process_link_up(phba, la);
} else {
phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
......@@ -940,7 +1093,7 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
......@@ -955,6 +1108,100 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
static void
lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
switch (mb->mbxStatus) {
case 0x0011:
case 0x0020:
case 0x9700:
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0911 cmpl_unreg_vpi, "
"mb status = 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
break;
default:
phba->vpi_cnt--;
}
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
/*
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
*/
if (vport->load_flag & FC_UNLOADING)
scsi_host_put(shost);
}
void
lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return;
lpfc_unreg_vpi(phba, vport->vpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
"%d (%d):1800 Could not issue unreg_vpi\n",
phba->brd_no, vport->vpi);
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
}
}
static void
lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
MAILBOX_t *mb = &pmb->mb;
switch (mb->mbxStatus) {
case 0x0011:
case 0x9601:
case 0x9602:
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
vport->fc_myDID = 0;
goto out;
}
phba->vpi_cnt++;
vport->num_disc_nodes = 0;
/* go thru NPR list and issue ELS PLOGIs */
if (vport->fc_npr_cnt)
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
}
vport->port_state = LPFC_VPORT_READY;
out:
mempool_free(pmb, phba->mbox_mem_pool);
return;
}
/*
* This routine handles processing a Fabric REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
......@@ -964,10 +1211,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
void
lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_vport *vport = pmb->vport;
struct lpfc_vport *next_vport;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
struct lpfc_nodelist *ndlp;
ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
......@@ -979,11 +1227,20 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_nlp_put(ndlp);
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
if (phba->fc_topology == TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0258 Register Fabric login error: 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
/* Start discovery */
lpfc_disc_start(vport);
return;
}
......@@ -994,47 +1251,25 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* This NPort has been assigned an NPort_ID by the fabric as a
* result of the completed fabric login. Issue a State Change
* Registration (SCR) ELS request to the fabric controller
* (SCR_DID) so that this NPort gets RSCN events from the
* fabric.
*/
lpfc_issue_els_scr(vport, SCR_DID, 0);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
/* Allocate a new node instance. If the pool is empty,
* start the discovery process and skip the Nameserver
* login process. This is attempted again later on.
* Otherwise, issue a Port Login (PLOGI) to
* the NameServer
*/
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp) {
lpfc_disc_start(vport);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
return;
} else {
lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_type |= NLP_FABRIC;
}
}
list_for_each_entry(next_vport, &phba->port_list, listentry) {
if (next_vport->port_type == LPFC_PHYSICAL_PORT)
continue;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, NameServer_DID, 0);
if (phba->cfg_fdmi_on) {
ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
GFP_KERNEL);
if (ndlp_fdmi) {
lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
ndlp_fdmi->nlp_type |= NLP_FABRIC;
ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_issue_els_plogi(vport, FDMI_DID, 0);
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(next_vport);
else {
if (phba->sli3_options &
LPFC_SLI3_NPIV_ENABLED) {
lpfc_vport_set_state(vport,
FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0259 No NPIV Fabric "
"support\n",
phba->brd_no, vport->vpi);
}
}
}
lpfc_do_scr_ns_plogi(phba, vport);
}
lpfc_mbuf_free(phba, mp->virt, mp->phys);
......@@ -1058,20 +1293,28 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
if (mb->mbxStatus) {
out:
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_drop_node(vport, ndlp);
/*
* RegLogin failed, so just use loop map to make discovery
* list
*/
lpfc_disc_list_loopmap(vport);
if (phba->fc_topology == TOPOLOGY_LOOP) {
/*
* RegLogin failed, use loop map to make discovery
* list
*/
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
/* Start discovery */
lpfc_disc_start(vport);
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0260 Register NameServer error: 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
return;
}
......@@ -1083,17 +1326,21 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RNN_ID);
lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RSNN_NN);
lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFT_ID);
lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFF_ID);
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, SCR_DID, 0);
}
vport->fc_ns_retry = 0;
/* Good status, issue CT Request to NameServer */
if (lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT)) {
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
/* Cannot issue NameServer Query, so finish up discovery */
lpfc_disc_start(vport);
goto out;
}
lpfc_nlp_put(ndlp);
......@@ -1127,7 +1374,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* registered the port.
*/
if (ndlp->rport && ndlp->rport->dd_data &&
*(struct lpfc_rport_data **) ndlp->rport->dd_data) {
((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
lpfc_nlp_put(ndlp);
}
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
......@@ -1147,16 +1394,16 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
del_timer_sync(&ndlp->nlp_initiator_tmr);
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
return;
}
......@@ -1164,14 +1411,6 @@ static void
lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
struct lpfc_rport_data *rdata = rport->dd_data;
if (rport->scsi_target_id == -1) {
ndlp->rport = NULL;
rdata->pnode = NULL;
lpfc_nlp_put(ndlp);
put_device(&rport->dev);
}
fc_remote_port_delete(rport);
......@@ -1377,9 +1616,9 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
/* Start Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0247 Start Discovery Timer state x%x "
"%d (%d):0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
phba->brd_no, vport->port_state, tmo,
phba->brd_no, vport->vpi, vport->port_state, tmo,
(unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
......@@ -1409,10 +1648,11 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0248 Cancel Discovery Timer state x%x "
"%d (%d):0248 Cancel Discovery Timer state x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->port_state, vport->fc_flag,
vport->fc_plogi_cnt, vport->fc_adisc_cnt);
phba->brd_no, vport->vpi, vport->port_state,
vport->fc_flag, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
return 0;
}
......@@ -1429,6 +1669,11 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
{
struct lpfc_sli *psli = &phba->sli;
IOCB_t *icmd = &iocb->iocb;
struct lpfc_vport *vport = ndlp->vport;
if (iocb->vport != vport)
return 0;
if (pring->ringno == LPFC_ELS_RING) {
switch (icmd->ulpCommand) {
case CMD_GEN_REQUEST64_CR:
......@@ -1446,7 +1691,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
} else if (pring->ringno == psli->fcp_ring) {
/* Skip match check if waiting to relogin to FCP target */
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
......@@ -1472,6 +1717,8 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
IOCB_t *icmd;
uint32_t rpi, i;
lpfc_fabric_abort_nport(ndlp);
/*
* Everything that matches on txcmplq will be returned
* by firmware with a no rpi error.
......@@ -1490,8 +1737,8 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* Check to see if iocb matches the nport we are
* looking for
*/
if ((lpfc_check_sli_ndlp
(phba, pring, iocb, ndlp))) {
if ((lpfc_check_sli_ndlp(phba, pring, iocb,
ndlp))) {
/* It matches, so deque and call compl
with an error */
list_move_tail(&iocb->list,
......@@ -1505,7 +1752,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del(&iocb->list);
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
......@@ -1539,11 +1786,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_rpi) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox
(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
mempool_free(mbox, phba->mbox_mem_pool);
}
......@@ -1554,6 +1801,50 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
void
lpfc_unreg_all_rpis(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
}
}
}
void
lpfc_unreg_default_rpis(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
"%d (%d):1815 Could not issue "
"unreg_did (default rpis)\n",
phba->brd_no, vport->vpi);
mempool_free(mbox, phba->mbox_mem_pool);
}
}
}
/*
* Free resources associated with LPFC_NODELIST entry
* so it can be freed.
......@@ -1568,9 +1859,9 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0900 Cleanup node for NPort x%x "
"%d (%d):0900 Cleanup node for NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_dequeue_node(vport, ndlp);
......@@ -1587,7 +1878,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
......@@ -1607,9 +1898,12 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
del_timer_sync(&ndlp->nlp_initiator_tmr);
if (!list_empty(&ndlp->els_retry_evt.evt_listp))
list_del_init(&ndlp->els_retry_evt.evt_listp);
if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
list_del_init(&ndlp->dev_loss_evt.evt_listp);
lpfc_unreg_rpi(vport, ndlp);
......@@ -1633,12 +1927,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_node(vport, ndlp);
/*
* We should never get here with a non-NULL ndlp->rport. But
* if we do, drop the reference to the rport. That seems the
* intelligent thing to do.
* We can get here with a non-NULL ndlp->rport because when we
* unregister a rport we don't break the rport/node linkage. So if we
* do, make sure we don't leaving any dangling pointers behind.
*/
if (ndlp->rport && !(vport->load_flag & FC_UNLOADING)) {
put_device(&ndlp->rport->dev);
if (ndlp->rport) {
rdata = ndlp->rport->dd_data;
rdata->pnode = NULL;
ndlp->rport = NULL;
......@@ -1709,9 +2002,9 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0929 FIND node DID "
"%d (%d):0929 FIND node DID "
" Data: x%p x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
return ndlp;
......@@ -1720,8 +2013,8 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
/* FIND node did <did> NOT FOUND */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0932 FIND node did x%x NOT FOUND.\n",
phba->brd_no, did);
"%d (%d):0932 FIND node did x%x NOT FOUND.\n",
phba->brd_no, vport->vpi, did);
return NULL;
}
......@@ -1835,6 +2128,14 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
int rc;
/*
* if it's not a physical port or if we already send
* clear_la then don't send it.
*/
if ((phba->link_state >= LPFC_CLEAR_LA) ||
(vport->port_type != LPFC_PHYSICAL_PORT))
return;
/* Link up discovery */
if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
phba->link_state = LPFC_CLEAR_LA;
......@@ -1849,7 +2150,26 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
vport->port_state = LPFC_VPORT_READY;
phba->link_state = LPFC_HBA_ERROR;
}
}
}
/* Reg_vpi to tell firmware to resume normal operations */
void
lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *regvpimbox;
regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (regvpimbox) {
lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
regvpimbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, regvpimbox,
(MBX_NOWAIT | MBX_STOP_IOCB))
== MBX_NOT_FINISHED) {
mempool_free(regvpimbox, phba->mbox_mem_pool);
}
}
}
......@@ -1860,7 +2180,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
uint32_t num_sent;
uint32_t clear_la_pending;
int did_changed;
......@@ -1888,21 +2207,11 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0202 Start Discovery hba state x%x "
"%d (%d):0202 Start Discovery hba state x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->port_state, vport->fc_flag,
vport->fc_plogi_cnt, vport->fc_adisc_cnt);
/* If our did changed, we MUST do PLOGI */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
did_changed) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
}
}
phba->brd_no, vport->vpi, vport->port_state,
vport->fc_flag, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
......@@ -1910,12 +2219,26 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
/*
* For SLI3, cmpl_reg_vpi will set port_state to READY, and
* continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_RSCN_MODE)) {
lpfc_issue_reg_vpi(phba, vport);
return;
}
/*
* For SLI2, we need to set port_state to READY and continue
* discovery.
*/
if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
if (vport->port_type == LPFC_PHYSICAL_PORT) {
/* If we get here, there is nothing to ADISC */
if (vport->port_type == LPFC_PHYSICAL_PORT)
lpfc_issue_clear_la(phba, vport);
} else if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
vport->num_disc_nodes = 0;
/* go thru NPR nodes and issue ELS PLOGIs */
if (vport->fc_npr_cnt)
......@@ -1925,9 +2248,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
}
vport->port_state = LPFC_VPORT_READY;
}
vport->port_state = LPFC_VPORT_READY;
} else {
/* Next do PLOGIs - if any */
num_sent = lpfc_els_disc_plogi(vport);
......@@ -1944,6 +2268,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_RSCN_MODE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
} else
lpfc_els_handle_rscn(vport);
}
......@@ -1999,7 +2324,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del(&iocb->list);
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
......@@ -2030,6 +2355,14 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
}
/*****************************************************************************/
/*
* NAME: lpfc_disc_timeout
......@@ -2060,8 +2393,10 @@ lpfc_disc_timeout(unsigned long ptr)
vport->work_port_events |= WORKER_DISC_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags);
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags);
}
return;
}
......@@ -2073,7 +2408,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_nodelist *ndlp, *next_ndlp;
LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
LPFC_MBOXQ_t *initlinkmbox;
int rc, clrlaerr = 0;
if (!(vport->fc_flag & FC_DISC_TMO))
......@@ -2091,8 +2426,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
*/
/* FAN timeout */
lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
"%d:0221 FAN timeout\n",
phba->brd_no);
"%d (%d):0221 FAN timeout\n",
phba->brd_no, vport->vpi);
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
......@@ -2109,17 +2444,21 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
lpfc_unreg_rpi(vport, ndlp);
}
}
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
if (vport->port_state != LPFC_FLOGI) {
vport->port_state = LPFC_FLOGI;
lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
}
break;
case LPFC_FDISC:
case LPFC_FLOGI:
/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0222 Initial FLOGI timeout\n",
phba->brd_no);
"%d (%d):0222 Initial %s timeout\n",
phba->brd_no, vport->vpi,
vport->vpi ? "FLOGI" : "FDISC");
/* Assume no Fabric and go on with discovery.
* Check for outstanding ELS FLOGI to abort.
......@@ -2136,8 +2475,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0223 Timeout while waiting for NameServer "
"login\n", phba->brd_no);
"%d (%d):0223 Timeout while waiting for "
"NameServer login\n",
phba->brd_no, vport->vpi);
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
......@@ -2150,53 +2490,40 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0224 NameServer Query timeout "
"%d (%d):0224 NameServer Query timeout "
"Data: x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
/* Try it one more time */
rc = lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT);
if (rc == 0)
break;
}
vport->fc_ns_retry = 0;
}
/* Nothing to authenticate, so CLEAR_LA right now */
clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!clearlambox) {
clrlaerr = 1;
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0226 Device Discovery "
"completion error\n",
phba->brd_no);
phba->link_state = LPFC_HBA_ERROR;
break;
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
/* Try it one more time */
vport->fc_ns_retry++;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, 0);
if (rc == 0)
break;
}
vport->fc_ns_retry = 0;
phba->link_state = LPFC_CLEAR_LA;
lpfc_clear_la(phba, clearlambox);
clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
clearlambox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, clearlambox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mempool_free(clearlambox, phba->mbox_mem_pool);
clrlaerr = 1;
break;
/*
* Discovery is over.
* set port_state to PORT_READY if SLI2.
* cmpl_reg_vpi will set port_state to READY for SLI3.
*/
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
else { /* NPIV Not enabled */
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0206 Device Discovery "
"%d (%d):0206 Device Discovery "
"completion error\n",
phba->brd_no);
phba->brd_no, vport->vpi);
phba->link_state = LPFC_HBA_ERROR;
break;
}
......@@ -2206,6 +2533,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
phba->cfg_link_speed);
initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
initlinkmbox->vport = vport;
initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
lpfc_set_loopback_flag(phba);
......@@ -2217,37 +2545,28 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0227 Node Authentication timeout\n",
phba->brd_no);
"%d (%d):0227 Node Authentication timeout\n",
phba->brd_no, vport->vpi);
lpfc_disc_flush_list(vport);
clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!clearlambox) {
clrlaerr = 1;
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0207 Device Discovery "
"completion error\n",
phba->brd_no);
phba->link_state = LPFC_HBA_ERROR;
break;
}
phba->link_state = LPFC_CLEAR_LA;
lpfc_clear_la(phba, clearlambox);
clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
clearlambox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, clearlambox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mempool_free(clearlambox, phba->mbox_mem_pool);
clrlaerr = 1;
/*
* set port_state to PORT_READY if SLI2.
* cmpl_reg_vpi will set port_state to READY for SLI3.
*/
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
else { /* NPIV Not enabled */
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
break;
case LPFC_VPORT_READY:
if (vport->fc_flag & FC_RSCN_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0231 RSCN timeout Data: x%x x%x\n",
phba->brd_no,
"%d (%d):0231 RSCN timeout Data: x%x "
"x%x\n",
phba->brd_no, vport->vpi,
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
/* Cleanup any outstanding ELS commands */
......@@ -2258,23 +2577,21 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
}
break;
case LPFC_STATE_UNKNOWN:
case LPFC_NS_REG:
case LPFC_BUILD_DISC_LIST:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0229 Unexpected discovery timeout, vport "
"State x%x\n",
vport->port_state, phba->brd_no);
"%d (%d):0229 Unexpected discovery timeout, "
"vport State x%x\n",
phba->brd_no, vport->vpi, vport->port_state);
break;
}
switch (phba->link_state) {
case LPFC_CLEAR_LA:
/* CLEAR LA timeout */
/* CLEAR LA timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0228 CLEAR LA timeout\n",
phba->brd_no);
"%d (%d):0228 CLEAR LA timeout\n",
phba->brd_no, vport->vpi);
clrlaerr = 1;
break;
......@@ -2286,11 +2603,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_LINK_UP:
case LPFC_HBA_ERROR:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0230 Unexpected timeout, hba link "
"%d (%d):0230 Unexpected timeout, hba link "
"state x%x\n",
phba->brd_no, phba->link_state);
phba->brd_no, vport->vpi, phba->link_state);
clrlaerr = 1;
break;
case LPFC_HBA_READY:
break;
}
if (clrlaerr) {
......@@ -2374,7 +2694,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
/*
* Search node lists for a remote port matching filter criteria
* This routine is used when the caller does NOT have host_lock.
* Caller needs to hold host_lock before calling this routine.
*/
struct lpfc_nodelist *
lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
......@@ -2426,12 +2746,42 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
return NULL;
}
void
lpfc_dev_loss_delay(unsigned long ptr)
{
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
unsigned long flags;
evtp = &ndlp->dev_loss_evt;
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
void
lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
{
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
init_timer(&ndlp->nlp_initiator_tmr);
ndlp->nlp_initiator_tmr.function = lpfc_dev_loss_delay;
ndlp->nlp_initiator_tmr.data = (unsigned long)ndlp;
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
......
......@@ -64,6 +64,7 @@
#define SLI3_IOCB_CMD_SIZE 128
#define SLI3_IOCB_RSP_SIZE 64
/* Common Transport structures and definitions */
union CtRevisionId {
......@@ -84,6 +85,9 @@ union CtCommandResponse {
uint32_t word;
};
#define FC4_FEATURE_INIT 0x2
#define FC4_FEATURE_TARGET 0x1
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
union CtRevisionId RevisionId;
......@@ -126,20 +130,6 @@ struct lpfc_sli_ct_request {
uint32_t rsvd[7];
} rft;
struct rff {
uint32_t PortId;
uint8_t reserved[2];
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t feature_res:6;
uint8_t feature_init:1;
uint8_t feature_tgt:1;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint8_t feature_tgt:1;
uint8_t feature_init:1;
uint8_t feature_res:6;
#endif
uint8_t type_code; /* type=8 for FCP */
} rff;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
uint8_t wwnn[8];
......@@ -149,15 +139,42 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rsnn;
struct rspn { /* For RSPN_ID requests */
uint32_t PortId;
uint8_t len;
uint8_t symbname[255];
} rspn;
struct gff {
uint32_t PortId;
} gff;
struct gff_acc {
uint8_t fbits[128];
} gff_acc;
#define FCP_TYPE_FEATURE_OFFSET 4
struct rff {
uint32_t PortId;
uint8_t reserved[2];
uint8_t fbits;
uint8_t type_code; /* type=8 for FCP */
} rff;
} un;
};
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid))
#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gff))
#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rft))
#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rff))
#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rnn))
#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rsnn))
#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspn))
/*
* FsType Definitions
......@@ -232,6 +249,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_GFT_ID 0x0117
#define SLI_CTNS_GSPN_ID 0x0118
#define SLI_CTNS_GPT_ID 0x011A
#define SLI_CTNS_GFF_ID 0x011F
#define SLI_CTNS_GID_PN 0x0121
#define SLI_CTNS_GID_NN 0x0131
#define SLI_CTNS_GIP_NN 0x0135
......@@ -245,9 +263,9 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RNN_ID 0x0213
#define SLI_CTNS_RCS_ID 0x0214
#define SLI_CTNS_RFT_ID 0x0217
#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RSPN_ID 0x0218
#define SLI_CTNS_RPT_ID 0x021A
#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239
......@@ -316,8 +334,9 @@ struct csp {
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
uint16_t response_multiple_Nport:1; /* FC Word 1, bit 29 */
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
uint16_t fPort:1; /* FC Word 1, bit 28 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
......@@ -336,9 +355,9 @@ struct csp {
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t fPort:1; /* FC Word 1, bit 28 */
uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t payloadlength:1; /* FC Word 1, bit 16 */
uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
......@@ -1268,6 +1287,10 @@ typedef struct { /* FireFly BIU registers */
#define MBX_READ_RPI64 0x8F
#define MBX_REG_LOGIN64 0x93
#define MBX_READ_LA64 0x95
#define MBX_REG_VPI 0x96
#define MBX_UNREG_VPI 0x97
#define MBX_REG_VNPID 0x96
#define MBX_UNREG_VNPID 0x97
#define MBX_FLASH_WR_ULA 0x98
#define MBX_SET_DEBUG 0x99
......@@ -1570,7 +1593,7 @@ typedef struct {
#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
......@@ -2086,6 +2109,45 @@ typedef struct {
#endif
} UNREG_LOGIN_VAR;
/* Structure for MB Command REG_VPI (0x96) */
typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1;
uint32_t rsvd2:8;
uint32_t sid:24;
uint32_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t rsvd6;
uint16_t vpi;
#else /* __LITTLE_ENDIAN */
uint32_t rsvd1;
uint32_t sid:24;
uint32_t rsvd2:8;
uint32_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t vpi;
uint16_t rsvd6;
#endif
} REG_VPI_VAR;
/* Structure for MB Command UNREG_VPI (0x97) */
typedef struct {
uint32_t rsvd1;
uint32_t rsvd2;
uint32_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t rsvd6;
uint16_t vpi;
#else /* __LITTLE_ENDIAN */
uint16_t vpi;
uint16_t rsvd6;
#endif
} UNREG_VPI_VAR;
/* Structure for MB Command UNREG_D_ID (0x23) */
typedef struct {
......@@ -2549,8 +2611,8 @@ typedef union {
LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
......@@ -2575,6 +2637,8 @@ typedef union {
*/
struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
} MAILVARIANTS;
/*
......@@ -2614,7 +2678,6 @@ typedef union {
struct sli3_pgp s3_pgp;
} SLI_VAR;
typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t mbxStatus;
......@@ -2935,6 +2998,8 @@ struct rcv_sli3 {
struct ulp_bde64 bde2;
};
typedef struct _IOCB { /* IOCB structure */
union {
GENERIC_RSP grsp; /* Generic response */
......@@ -3011,6 +3076,7 @@ typedef struct _IOCB { /* IOCB structure */
uint32_t ulpXS:1;
uint32_t ulpTimeout:8;
#endif
union {
struct rcv_sli3 rcvsli3; /* words 8 - 15 */
uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
......@@ -3024,6 +3090,7 @@ typedef struct _IOCB { /* IOCB structure */
#define PARM_UNUSED 0 /* PU field (Word 4) not used */
#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
#define PARM_NPIV_DID 3
#define CLASS1 0 /* Class 1 */
#define CLASS2 1 /* Class 2 */
#define CLASS3 2 /* Class 3 */
......@@ -3044,7 +3111,7 @@ typedef struct _IOCB { /* IOCB structure */
#define IOSTAT_RSVD2 0xC
#define IOSTAT_RSVD3 0xD
#define IOSTAT_RSVD4 0xE
#define IOSTAT_RSVD5 0xF
#define IOSTAT_NEED_BUFFER 0xF
#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
#define IOSTAT_CNT 0x11
......
......@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
......@@ -40,21 +41,18 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
#include "lpfc_vport.h"
static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
int lpfc_sli_mode = 0;
module_param(lpfc_sli_mode, int, 0);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
/************************************************************************/
......@@ -123,6 +121,8 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
sizeof(phba->wwpn));
}
phba->sli3_options = 0x0;
/* Setup and issue mailbox READ REV command */
lpfc_read_rev(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
......@@ -136,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
return -ERESTART;
}
/*
* The value of rr must be 1 since the driver set the cv field to 1.
* This setting requires the FW to set all revision fields.
......@@ -155,6 +156,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
/* Save information as VPD data */
vp->rev.rBit = 1;
memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
......@@ -170,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
/* If the sli feature level is less then 9, we must
* tear down all RPIs and VPIs on link down if NPIV
* is enabled.
*/
if (vp->rev.feaLevelHigh < 9)
phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
if (lpfc_is_LC_HBA(phba->pcidev->device))
memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
sizeof (phba->RandomData));
......@@ -197,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
mb->un.varDmp.word_cnt);
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
......@@ -240,7 +249,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
mb = &pmb->mb;
/* Get login parameters for NID. */
lpfc_read_sparam(phba, pmb);
lpfc_read_sparam(phba, pmb, 0);
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
......@@ -431,10 +440,9 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* Cleanup potential discovery resources */
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
list_for_each_entry(vport, &phba->port_list, listentry) {
lpfc_cleanup_discovery_resources(vport);
}
return 0;
}
......@@ -456,13 +464,17 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
struct lpfc_dmabuf *mp, *next_mp;
int i;
/* Cleanup preposted buffers on the ELS ring */
pring = &psli->ring[LPFC_ELS_RING];
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
list_del(&mp->list);
pring->postbufq_cnt--;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
lpfc_sli_hbqbuf_free_all(phba);
else {
/* Cleanup preposted buffers on the ELS ring */
pring = &psli->ring[LPFC_ELS_RING];
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
list_del(&mp->list);
pring->postbufq_cnt--;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
}
for (i = 0; i < psli->num_rings; i++) {
......@@ -485,10 +497,11 @@ void
lpfc_handle_eratt(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_vport *port_iterator;
uint32_t event_data;
struct Scsi_Host *shost;
/* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */
......@@ -503,10 +516,17 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
"Data: x%x x%x x%x\n",
phba->brd_no, phba->work_hs,
phba->work_status[0], phba->work_status[1]);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_ESTABLISH_LINK;
list_for_each_entry(port_iterator, &phba->port_list,
listentry) {
shost = lpfc_shost_from_vport(port_iterator);
spin_lock_irq(shost->host_lock);
port_iterator->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(shost->host_lock);
}
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(shost->host_lock);
spin_unlock_irq(&phba->hbalock);
/*
* Firmware stops when it triggled erratt with HS_FFER6.
......@@ -543,11 +563,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
phba->work_status[0], phba->work_status[1]);
event_data = FC_REG_DUMP_EVENT;
shost = lpfc_shost_from_vport(vport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_unblock_mgmt_io(phba);
......@@ -569,6 +592,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_vport *port_iterator;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
struct lpfc_dmabuf *mp;
......@@ -589,7 +613,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
rc = -EIO;
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
list_for_each_entry(port_iterator, &phba->port_list, listentry)
lpfc_els_flush_cmd(port_iterator);
psli->slistat.link_event++;
lpfc_read_la(phba, pmb, mp);
......@@ -1023,9 +1048,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
return cnt;
}
lpfc_sli_ringpostbuf_put(phba, pring, mp1);
if (mp2) {
if (mp2)
lpfc_sli_ringpostbuf_put(phba, pring, mp2);
}
}
pring->missbufcnt = 0;
return 0;
......@@ -1175,34 +1199,45 @@ lpfc_cleanup(struct lpfc_vport *vport)
static void
lpfc_establish_link_tmo(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long iflag;
/* Re-establishing Link, timer expired */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"%d:1300 Re-establishing Link, timer expired "
"Data: x%x x%x\n",
phba->brd_no, vport->fc_flag,
vport->port_state);
spin_lock_irqsave(shost->host_lock, iflag);
vport->fc_flag &= ~FC_ESTABLISH_LINK;
spin_unlock_irqrestore(shost->host_lock, iflag);
list_for_each_entry(vport, &phba->port_list, listentry) {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
spin_lock_irqsave(shost->host_lock, iflag);
vport->fc_flag &= ~FC_ESTABLISH_LINK;
spin_unlock_irqrestore(shost->host_lock, iflag);
}
}
void
lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
del_timer_sync(&vport->els_tmofunc);
del_timer_sync(&vport->fc_fdmitmo);
lpfc_can_disctmo(vport);
return;
}
static void
lpfc_stop_timer(struct lpfc_hba *phba)
lpfc_stop_phba_timers(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_vport *vport;
del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
del_timer_sync(&vport->els_tmofunc);
del_timer_sync(&vport->fc_fdmitmo);
del_timer_sync(&vport->fc_disctmo);
list_for_each_entry(vport, &phba->port_list, listentry)
lpfc_stop_vport_timers(vport);
del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer);
return;
}
......@@ -1210,7 +1245,6 @@ int
lpfc_online(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (!phba)
return 0;
......@@ -1234,9 +1268,14 @@ lpfc_online(struct lpfc_hba *phba)
return 1;
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
list_for_each_entry(vport, &phba->port_list, listentry) {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_OFFLINE_MODE;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
}
lpfc_unblock_mgmt_io(phba);
return 0;
......@@ -1288,31 +1327,37 @@ lpfc_offline(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long iflag;
struct lpfc_vport *port_iterator;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
/* stop all timers associated with this hba */
lpfc_stop_timer(phba);
lpfc_stop_phba_timers(phba);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
port_iterator->work_port_events = 0;
}
lpfc_printf_log(phba,
KERN_WARNING,
LOG_INIT,
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0460 Bring Adapter offline\n",
phba->brd_no);
/* Bring down the SLI Layer and cleanup. The HBA is offline
now. */
lpfc_sli_hba_down(phba);
lpfc_cleanup(vport);
spin_lock_irqsave(shost->host_lock, iflag);
spin_lock(&phba->hbalock);
spin_lock_irq(&phba->hbalock);
phba->work_ha = 0;
vport->work_port_events = 0;
vport->fc_flag |= FC_OFFLINE_MODE;
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(shost->host_lock, iflag);
spin_unlock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
shost = lpfc_shost_from_vport(port_iterator);
lpfc_cleanup(port_iterator);
spin_lock_irq(shost->host_lock);
vport->work_port_events = 0;
vport->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
}
}
/******************************************************************************
......@@ -1332,7 +1377,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
list_del(&sb->list);
pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
sb->dma_handle);
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
}
......@@ -1349,8 +1394,9 @@ lpfc_scsi_free(struct lpfc_hba *phba)
return 0;
}
struct lpfc_vport *
lpfc_create_port(struct lpfc_hba *phba, int instance)
lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost;
......@@ -1364,6 +1410,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
vport->phba = phba;
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
shost->unique_id = instance;
shost->max_id = LPFC_MAX_TARGET;
......@@ -1376,7 +1423,13 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
* max xri value determined in hba setup.
*/
shost->can_queue = phba->cfg_hba_queue_depth - 10;
shost->transportt = lpfc_transport_template;
if (fc_vport != NULL) {
shost->transportt = lpfc_vport_transport_template;
vport->port_type = LPFC_NPIV_PORT;
} else {
shost->transportt = lpfc_transport_template;
vport->port_type = LPFC_PHYSICAL_PORT;
}
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
......@@ -1384,22 +1437,28 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
init_timer(&vport->fc_disctmo);
vport->fc_disctmo.function = lpfc_disc_timeout;
vport->fc_disctmo.data = (unsigned long) vport;
vport->fc_disctmo.data = (unsigned long)vport;
init_timer(&vport->fc_fdmitmo);
vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
vport->fc_fdmitmo.data = (unsigned long) vport;
vport->fc_fdmitmo.data = (unsigned long)vport;
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long) vport;
vport->els_tmofunc.data = (unsigned long)vport;
error = scsi_add_host(shost, &phba->pcidev->dev);
if (fc_vport != NULL) {
error = scsi_add_host(shost, &fc_vport->dev);
} else {
error = scsi_add_host(shost, &phba->pcidev->dev);
}
if (error)
goto out_put_shost;
if (!shost->shost_classdev.kobj.dentry)
goto out_put_shost;
list_add_tail(&vport->listentry, &phba->port_list);
scsi_scan_host(shost);
return vport;
out_put_shost:
......@@ -1411,19 +1470,40 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
void
destroy_port(struct lpfc_vport *vport)
{
lpfc_cleanup(vport);
list_del(&vport->listentry);
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
kfree(vport->vname);
lpfc_free_sysfs_attr(vport);
fc_remove_host(lpfc_shost_from_vport(vport));
scsi_remove_host(lpfc_shost_from_vport(vport));
fc_remove_host(shost);
scsi_remove_host(shost);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
lpfc_cleanup(vport);
return;
}
int
lpfc_get_instance(void)
{
int instance = 0;
/* Assign an unused number */
if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
return -1;
if (idr_get_new(&lpfc_hba_index, NULL, &instance))
return -1;
return instance;
}
static void
lpfc_remove_device(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
lpfc_free_sysfs_attr(vport);
......@@ -1433,8 +1513,6 @@ lpfc_remove_device(struct lpfc_vport *vport)
fc_remove_host(shost);
scsi_remove_host(shost);
kthread_stop(phba->worker_thread);
}
void lpfc_scan_start(struct Scsi_Host *shost)
......@@ -1442,7 +1520,7 @@ void lpfc_scan_start(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (lpfc_alloc_sysfs_attr(vport))
if (lpfc_sli_hba_setup(phba))
goto error;
/*
......@@ -1486,6 +1564,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 0;
finished:
lpfc_host_attrib_init(shost);
return 1;
}
void lpfc_host_attrib_init(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
/*
* Set fixed host attributes. Must done after lpfc_sli_hba_setup().
*/
......@@ -1499,7 +1585,8 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
fc_host_supported_fc4s(shost)[2] = 1;
fc_host_supported_fc4s(shost)[7] = 1;
lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
if (phba->lmt & LMT_10Gb)
......@@ -1521,11 +1608,10 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
fc_host_active_fc4s(shost)[2] = 1;
fc_host_active_fc4s(shost)[7] = 1;
fc_host_max_npiv_vports(shost) = phba->max_vpi;
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_LOADING;
spin_unlock_irq(shost->host_lock);
return 1;
}
static int __devinit
......@@ -1555,20 +1641,17 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->pcidev = pdev;
/* Assign an unused board number */
if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
goto out_free_phba;
error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
if (error)
if ((phba->brd_no = lpfc_get_instance()) < 0)
goto out_free_phba;
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->hbq_buffer_list);
/*
* Get all the module params for configuring this host and then
* establish the host.
*/
lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI;
/* Initialize timers used by driver */
init_timer(&phba->fc_estabtmo);
......@@ -1581,6 +1664,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
init_timer(&phba->fcp_poll_timer);
phba->fcp_poll_timer.function = lpfc_poll_timeout;
phba->fcp_poll_timer.data = (unsigned long) phba;
init_timer(&phba->fabric_block_timer);
phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
phba->fabric_block_timer.data = (unsigned long) phba;
pci_set_master(pdev);
retval = pci_set_mwi(pdev);
......@@ -1696,15 +1782,17 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
spin_lock_init(&phba->scsi_buf_list_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
vport = lpfc_create_port(phba, phba->brd_no);
/* Initialize list of fabric iocbs */
INIT_LIST_HEAD(&phba->fabric_iocb_list);
vport = lpfc_create_port(phba, phba->brd_no, NULL);
if (!vport)
goto out_kthread_stop;
shost = lpfc_shost_from_vport(vport);
vport->port_type = LPFC_PHYSICAL_PORT;
phba->pport = vport;
pci_set_drvdata(pdev, lpfc_shost_from_vport(vport));
pci_set_drvdata(pdev, shost);
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
......@@ -1720,7 +1808,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0451 Enable interrupt handler failed\n",
phba->brd_no);
goto out_destroy_port;
goto out_disable_msi;
}
phba->MBslimaddr = phba->slim_memmap_p;
......@@ -1729,10 +1817,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
error = lpfc_sli_hba_setup(phba);
if (error)
if (lpfc_alloc_sysfs_attr(vport))
goto out_free_irq;
scsi_scan_host(shost);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(shost->host_lock);
lpfc_poll_start_timer(phba);
......@@ -1742,11 +1830,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
return 0;
out_free_irq:
lpfc_stop_timer(phba);
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
free_irq(phba->pcidev->irq, phba);
out_disable_msi:
pci_disable_msi(phba->pcidev);
out_destroy_port:
destroy_port(vport);
out_kthread_stop:
kthread_stop(phba->worker_thread);
......@@ -1786,9 +1874,9 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
vport->load_flag |= FC_UNLOADING;
lpfc_remove_device(vport);
struct lpfc_vport *port_iterator;
list_for_each_entry(port_iterator, &phba->port_list, listentry)
port_iterator->load_flag |= FC_UNLOADING;
/*
* Bring down the SLI Layer. This step disable all interrupts,
......@@ -1798,7 +1886,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_sli_hba_down(phba);
lpfc_sli_brdrestart(phba);
lpfc_stop_timer(phba);
lpfc_stop_phba_timers(phba);
kthread_stop(phba->worker_thread);
......@@ -1806,7 +1894,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
vport->work_port_events = 0;
destroy_port(vport);
pci_set_drvdata(pdev, NULL);
......@@ -1892,13 +1979,14 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Re-establishing Link */
spin_lock_irq(&phba->hbalock);
phba->pport->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(&phba->hbalock);
spin_lock_irq(host->host_lock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
phba->pport->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(host->host_lock);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
/* Take device offline; this will perform cleanup */
lpfc_offline(phba);
......@@ -2020,11 +2108,15 @@ lpfc_init(void)
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
if (!lpfc_transport_template)
lpfc_vport_transport_template =
fc_attach_transport(&lpfc_vport_transport_functions);
if (!lpfc_transport_template || !lpfc_vport_transport_template)
return -ENOMEM;
error = pci_register_driver(&lpfc_driver);
if (error)
if (error) {
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
}
return error;
}
......@@ -2034,6 +2126,7 @@ lpfc_exit(void)
{
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
}
module_init(lpfc_init);
......
......@@ -30,6 +30,7 @@
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
#define LOG_LIBDFC 0x2000 /* Libdfc events */
#define LOG_VPORT 0x4000 /* NPIV events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
......
......@@ -106,7 +106,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
*/
pmb->context1 = (uint8_t *) mp;
mb->mbxOwner = OWN_HOST;
return 0;
return (0);
}
/**********************************************/
......@@ -209,7 +209,7 @@ lpfc_init_link(struct lpfc_hba * phba,
*/
vpd = &phba->vpd;
if (vpd->rev.feaLevelHigh >= 0x02){
switch (linkspeed){
switch(linkspeed){
case LINK_SPEED_1G:
case LINK_SPEED_2G:
case LINK_SPEED_4G:
......@@ -232,7 +232,6 @@ lpfc_init_link(struct lpfc_hba * phba,
mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
mb->mbxOwner = OWN_HOST;
mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
mb->un.varInitLnk.link_flags |= FLAGS_UNREG_LOGIN_ALL;
return;
}
......@@ -241,7 +240,7 @@ lpfc_init_link(struct lpfc_hba * phba,
/* mailbox command */
/**********************************************/
int
lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
......@@ -265,18 +264,19 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
LOG_MBOX,
"%d:0301 READ_SPARAM: no buffers\n",
phba->brd_no);
return 1;
return (1);
}
INIT_LIST_HEAD(&mp->list);
mb->mbxCommand = MBX_READ_SPARM64;
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
mb->un.varRdSparm.vpi = vpi;
/* save address for completion */
pmb->context1 = mp;
return 0;
return (0);
}
/********************************************/
......@@ -284,7 +284,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* mailbox command */
/********************************************/
void
lpfc_unreg_did(struct lpfc_hba *phba, uint32_t did, LPFC_MBOXQ_t *pmb)
lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
......@@ -292,6 +293,7 @@ lpfc_unreg_did(struct lpfc_hba *phba, uint32_t did, LPFC_MBOXQ_t *pmb)
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
mb->un.varUnregDID.vpi = vpi;
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
......@@ -337,8 +339,8 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* mailbox command */
/********************************************/
int
lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
LPFC_MBOXQ_t *pmb, uint32_t flag)
lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
{
MAILBOX_t *mb = &pmb->mb;
uint8_t *sparam;
......@@ -347,6 +349,7 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
mb->un.varRegLogin.vpi = vpi;
mb->un.varRegLogin.did = did;
mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
......@@ -358,13 +361,11 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
kfree(mp);
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
lpfc_printf_log(phba,
KERN_WARNING,
LOG_MBOX,
"%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
phba->brd_no,
(uint32_t) did, (uint32_t) flag);
return 1;
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
"flag x%x\n",
phba->brd_no, vpi, did, flag);
return (1);
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
......@@ -380,7 +381,7 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
return 0;
return (0);
}
/**********************************************/
......@@ -388,7 +389,8 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
/* mailbox command */
/**********************************************/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
......@@ -397,12 +399,52 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
mb->un.varUnregLogin.rsvd1 = 0;
mb->un.varUnregLogin.vpi = vpi;
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
return;
}
/**************************************************/
/* lpfc_reg_vpi Issue a REG_VPI */
/* mailbox command */
/**************************************************/
void
lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegVpi.vpi = vpi;
mb->un.varRegVpi.sid = sid;
mb->mbxCommand = MBX_REG_VPI;
mb->mbxOwner = OWN_HOST;
return;
}
/**************************************************/
/* lpfc_unreg_vpi Issue a UNREG_VNPI */
/* mailbox command */
/**************************************************/
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregVpi.vpi = vpi;
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
return;
}
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
......@@ -420,9 +462,9 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
pring = &psli->ring[i];
pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
SLI2_IOCB_CMD_SIZE;
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
SLI2_IOCB_RSP_SIZE;
SLI2_IOCB_RSP_SIZE;
/* A ring MUST have both cmd and rsp entries defined to be
valid */
if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
......@@ -437,18 +479,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
continue;
}
/* Command ring setup for ring */
pring->cmdringaddr = (void *)&phba->slim2p->IOCBs[iocbCnt];
pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
(uint8_t *)phba->slim2p;
offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
(uint8_t *) phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
pring->rspringaddr = (void *)&phba->slim2p->IOCBs[iocbCnt];
pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
......@@ -519,7 +561,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
* Notification */
hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
* # in words 0-19 */
hbqmb->profile = hbq_desc->profile; /* Selection profile:
hbqmb->profile = hbq_desc->profile; /* Selection profile:
* 0 = all,
* 7 = logentry */
hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
......@@ -538,9 +580,9 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
mb->mbxCommand = MBX_CONFIG_HBQ;
mb->mbxOwner = OWN_HOST;
/* Copy info for profiles 2,3,5. Other
* profiles this area is reserved
*/
/* Copy info for profiles 2,3,5. Other
* profiles this area is reserved
*/
if (hbq_desc->profile == 2)
lpfc_build_hbq_profile2(hbqmb, hbq_desc);
else if (hbq_desc->profile == 3)
......@@ -563,6 +605,8 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
return;
}
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
......@@ -605,7 +649,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
}
void
lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
MAILBOX_t *mb = &pmb->mb;
......@@ -629,11 +673,19 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* If HBA supports SLI=3 ask for it */
mb->un.varCfgPort.sli_mode = phba->sli_rev;
if (phba->sli_rev == 3) {
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
}
if (phba->max_vpi && lpfc_npiv_enable &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = phba->max_vpi;
mb->un.varCfgPort.cmv = 1;
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
phba->sli_rev = 2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
......@@ -748,7 +800,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* Swap PCB if needed */
lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
sizeof (PCB_t));
sizeof(PCB_t));
}
void
......@@ -783,13 +835,22 @@ lpfc_mbox_get(struct lpfc_hba * phba)
struct lpfc_sli *psli = &phba->sli;
list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
if (mbq) {
if (mbq)
psli->mboxq_cnt--;
}
return mbq;
}
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
/* This function expects to be called from interupt context */
spin_lock(&phba->hbalock);
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
spin_unlock(&phba->hbalock);
return;
}
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{
......
......@@ -44,6 +44,7 @@ int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int longs;
int i;
phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
......@@ -87,8 +88,15 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
if (!phba->lpfc_hbq_pool)
goto fail_free_nlp_mem_pool;
longs = (phba->max_vpi + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
if (!phba->vpi_bmask)
goto fail_free_hbq_pool;
return 0;
fail_free_hbq_pool:
lpfc_sli_hbqbuf_free_all(phba);
fail_free_nlp_mem_pool:
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
......@@ -119,9 +127,9 @@ lpfc_mem_free(struct lpfc_hba * phba)
struct lpfc_dmabuf *mp;
int i;
kfree(phba->vpi_bmask);
lpfc_sli_hbqbuf_free_all(phba);
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
......@@ -131,9 +139,17 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irq(&phba->hbalock);
if (psli->mbox_active) {
mbox = psli->mbox_active;
mp = (struct lpfc_dmabuf *) (mbox->context1);
......@@ -163,7 +179,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
phba->lpfc_scsi_dma_buf_pool = NULL;
phba->lpfc_mbuf_pool = NULL;
/* Free the iocb lookup array */
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
......@@ -179,7 +195,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
spin_lock_irqsave(&phba->hbalock, iflags);
if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
pool->current_count--;
ret = pool->elements[pool->current_count].virt;
*handle = pool->elements[pool->current_count].phys;
......@@ -214,7 +230,6 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
void *
lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
......@@ -230,3 +245,24 @@ lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
return;
}
void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{
struct hbq_dmabuf *hbq_entry;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
if (hbq_entry->tag == -1) {
lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
hbq_entry->dbuf.phys);
kfree(hbq_entry);
} else {
lpfc_sli_free_hbq(phba, hbq_entry);
}
} else {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
return;
}
/*******************************************************************
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
......@@ -35,6 +35,7 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
/* Called to verify a rcv'ed ADISC was intended for us. */
......@@ -74,12 +75,14 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hsp->cls1.rcvDataSizeLsb;
ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb;
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
}
} else if (class == CLASS1) {
return 0;
goto bad_service_param;
}
if (sp->cls2.classValid) {
......@@ -87,12 +90,14 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hsp->cls2.rcvDataSizeLsb;
ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb;
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
}
} else if (class == CLASS2) {
return 0;
goto bad_service_param;
}
if (sp->cls3.classValid) {
......@@ -100,12 +105,14 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hsp->cls3.rcvDataSizeLsb;
ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb;
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
}
} else if (class == CLASS3) {
return 0;
goto bad_service_param;
}
/*
......@@ -124,11 +131,22 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return 1;
bad_service_param:
lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0207 Device %x "
"(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
"invalid service parameters. Ignoring device.\n",
vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
return 0;
}
static void *
lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
struct lpfc_iocbq *rspiocb)
{
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
......@@ -176,10 +194,12 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0205 Abort outstanding I/O on NPort x%x "
"%d (%d):0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_fabric_abort_nport(ndlp);
/* First check the txq */
spin_lock_irq(&phba->hbalock);
......@@ -198,15 +218,16 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
/* Check to see if iocb matches the nport we are looking
for */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
}
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
list_del(&iocb->list);
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
......@@ -225,7 +246,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
struct lpfc_iocbq *cmdiocb)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
......@@ -244,7 +265,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* the FLOGI and resend it first.
*/
if (vport->fc_flag & FC_PT2PT) {
lpfc_els_abort_flogi(phba);
lpfc_els_abort_flogi(phba);
if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* If the other side is supposed to initiate
* the PLOGI anyway, just ACC it now and
......@@ -279,8 +300,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* PLOGI chkparm OK */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
phba->brd_no,
"%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi);
......@@ -314,8 +335,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
if ((vport->fc_flag & FC_PT2PT)
&& !(vport->fc_flag & FC_PT2PT_PLOGI)) {
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
vport->fc_myDID = icmd->un.rcvels.parmRo;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
......@@ -327,7 +348,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rc = lpfc_sli_issue_mbox
(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mempool_free( mbox, phba->mbox_mem_pool);
mempool_free(mbox, phba->mbox_mem_pool);
goto out;
}
......@@ -337,8 +358,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!mbox)
goto out;
rc = lpfc_reg_login(phba, icmd->un.rcvels.remoteID, (uint8_t *) sp,
mbox, 0);
rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *) sp, mbox, 0);
if (rc) {
mempool_free(mbox, phba->mbox_mem_pool);
goto out;
......@@ -415,7 +436,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
NULL, 0);
NULL, 0);
}
return 1;
}
......@@ -457,7 +478,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
if (!(ndlp->nlp_type & NLP_FABRIC) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
......@@ -499,8 +520,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
if (npr->prliType == PRLI_FCP_TYPE) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if (npr->targetFunc)
......@@ -526,15 +546,16 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
/* Check config parameter use-adisc or FCP-2 */
if (phba->cfg_use_adisc == 0 &&
(vport->fc_flag & FC_RSCN_MODE) == 0 &&
(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) == 0)
return 0;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
return 1;
if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
return 1;
}
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
static uint32_t
......@@ -542,9 +563,9 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
"%d:0253 Illegal State Transition: node x%x event x%x, "
"state x%x Data: x%x x%x\n",
vport->phba->brd_no,
"%d (%d):0253 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
return ndlp->nlp_state;
......@@ -629,7 +650,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
phba->fc_stat.elsLogiCol++;
port_cmp = memcmp(&vport->fc_portname, &sp->portName,
sizeof (struct lpfc_name));
sizeof(struct lpfc_name));
if (port_cmp >= 0) {
/* Reject this request because the remote node will accept
......@@ -644,13 +665,27 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
memset(&stat, 0, sizeof (struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* software abort outstanding PLOGI */
/* software abort outstanding PLOGI */
lpfc_els_abort(vport->phba, ndlp);
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
......@@ -724,9 +759,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
/* PLOGI chkparm OK */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d:0121 PLOGI chkparm OK "
"%d (%d):0121 PLOGI chkparm OK "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
......@@ -748,13 +783,20 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0133 PLOGI: no memory for reg_login "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
goto out;
}
lpfc_unreg_rpi(vport, ndlp);
if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp,
mbox, 0) == 0) {
if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, 0) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
......@@ -775,16 +817,37 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
lpfc_nlp_put(ndlp);
mp = (struct lpfc_dmabuf *)mbox->context1;
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
} else {
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0135 PLOGI: cannot format reg_login "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
}
out:
out:
if (ndlp->nlp_DID == NameServer_DID) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0261 Cannot Register NameServer login\n",
phba->brd_no, vport->vpi);
}
/* Free this node since the driver cannot login or has the wrong
sparm */
lpfc_drop_node(vport, ndlp);
......@@ -820,12 +883,18 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
spin_lock_irq(shost->host_lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
......@@ -924,7 +993,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
irsp = &rspiocb->iocb;
if ((irsp->ulpStatus) ||
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
spin_lock_irq(shost->host_lock);
......@@ -980,6 +1049,12 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp);
......@@ -987,9 +1062,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
......@@ -1035,6 +1109,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
if ((mb = phba->sli.mbox_active)) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
lpfc_nlp_put(ndlp);
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
......@@ -1049,6 +1124,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
lpfc_nlp_put(ndlp);
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
}
......@@ -1099,8 +1175,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d:0246 RegLogin failed Data: x%x x%x x%x\n",
phba->brd_no,
"%d (%d):0246 RegLogin failed Data: x%x x%x "
"x%x\n",
phba->brd_no, vport->vpi,
did, mb->mbxStatus, vport->port_state);
/*
......@@ -1167,11 +1244,18 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
......@@ -1239,6 +1323,7 @@ static uint32_t
lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
......@@ -1267,29 +1352,45 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
phba->cfg_vport_restrict_login) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_TARGET_REMOVE;
spin_unlock_irq(shost->host_lock);
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
if (ndlp->nlp_type & NLP_FCP_TARGET)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
/*! lpfc_device_rm_prli_issue
*
* \pre
* \post
* \param phba
* \param ndlp
* \param arg
* \param evt
* \return uint32_t
*
* \b Description:
* This routine is envoked when we a request to remove a nport we are in the
* process of PRLIing. We should software abort outstanding prli, unreg
* login, send a logout. We will change node state to UNUSED_NODE, put it
* in plogi state so it can be freed when LOGO completes.
*
*/
*
* \pre
* \post
* \param phba
* \param ndlp
* \param arg
* \param evt
* \return uint32_t
*
* \b Description:
* This routine is envoked when we a request to remove a nport we are in the
* process of PRLIing. We should software abort outstanding prli, unreg
* login, send a logout. We will change node state to UNUSED_NODE, put it
* on plogi list so it can be freed when LOGO completes.
*
*/
static uint32_t
lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
......@@ -1312,21 +1413,21 @@ lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/*! lpfc_device_recov_prli_issue
*
* \pre
* \post
* \param phba
* \param ndlp
* \param arg
* \param evt
* \return uint32_t
*
* \b Description:
* The routine is envoked when the state of a device is unknown, like
* during a link down. We should remove the nodelist entry from the
* unmapped list, issue a UNREG_LOGIN, do a software abort of the
* outstanding PRLI command, then free the node entry.
*/
*
* \pre
* \post
* \param phba
* \param ndlp
* \param arg
* \param evt
* \return uint32_t
*
* \b Description:
* The routine is envoked when the state of a device is unknown, like
* during a link down. We should remove the nodelist entry from the
* unmapped list, issue a UNREG_LOGIN, do a software abort of the
* outstanding PRLI command, then free the node entry.
*/
static uint32_t
lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
......@@ -1336,6 +1437,12 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
/* software abort outstanding PRLI */
lpfc_els_abort(phba, ndlp);
......@@ -1344,6 +1451,7 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
......@@ -1466,7 +1574,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* flush the target */
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
......@@ -1573,8 +1681,9 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* here will affect the counting of discovery threads.
*/
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(vport, ndlp, 0);
......@@ -1719,6 +1828,12 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
......@@ -1803,7 +1918,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
......@@ -1915,9 +2030,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0211 DSM in event x%x on NPort x%x in state %d "
"Data: x%x\n",
phba->brd_no,
"%d (%d):0211 DSM in event x%x on NPort x%x in "
"state %d Data: x%x\n",
phba->brd_no, vport->vpi,
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
......@@ -1925,9 +2040,10 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM out state <rc> on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
phba->brd_no,
rc, ndlp->nlp_DID, ndlp->nlp_flag);
"%d (%d):0212 DSM out state %d on NPort x%x "
"Data: x%x\n",
phba->brd_no, vport->vpi,
rc, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
......
......@@ -37,10 +37,159 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
/*
* This function is called with no lock held when there is a resource
* error in driver or in firmware.
*/
void
lpfc_adjust_queue_depth(struct lpfc_hba *phba)
{
unsigned long flags;
spin_lock_irqsave(&phba->hbalock, flags);
atomic_inc(&phba->num_rsrc_err);
phba->last_rsrc_error_time = jiffies;
if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
phba->last_ramp_down_time = jiffies;
spin_unlock_irqrestore(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
if ((phba->pport->work_port_events &
WORKER_RAMP_DOWN_QUEUE) == 0) {
phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
}
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags);
if (phba->work_wait)
wake_up(phba->work_wait);
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
/*
* This function is called with no lock held when there is a successful
* SCSI command completion.
*/
static inline void
lpfc_rampup_queue_depth(struct lpfc_hba *phba,
struct scsi_device *sdev)
{
unsigned long flags;
atomic_inc(&phba->num_cmd_success);
if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
return;
spin_lock_irqsave(&phba->hbalock, flags);
if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
phba->last_ramp_up_time = jiffies;
spin_unlock_irqrestore(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
if ((phba->pport->work_port_events &
WORKER_RAMP_UP_QUEUE) == 0) {
phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
}
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
spin_lock_irqsave(&phba->hbalock, flags);
if (phba->work_wait)
wake_up(phba->work_wait);
spin_unlock_irqrestore(&phba->hbalock, flags);
}
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct Scsi_Host *host;
struct scsi_device *sdev;
unsigned long new_queue_depth;
unsigned long num_rsrc_err, num_cmd_success;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
host = lpfc_shost_from_vport(vport);
if (!scsi_host_get(host))
continue;
spin_unlock_irq(&phba->hbalock);
shost_for_each_device(sdev, host) {
new_queue_depth = sdev->queue_depth * num_rsrc_err /
(num_rsrc_err + num_cmd_success);
if (!new_queue_depth)
new_queue_depth = sdev->queue_depth - 1;
else
new_queue_depth =
sdev->queue_depth - new_queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
new_queue_depth);
else
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
new_queue_depth);
}
spin_lock_irq(&phba->hbalock);
scsi_host_put(host);
}
spin_unlock_irq(&phba->hbalock);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
void
lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct Scsi_Host *host;
struct scsi_device *sdev;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
host = lpfc_shost_from_vport(vport);
if (!scsi_host_get(host))
continue;
spin_unlock_irq(&phba->hbalock);
shost_for_each_device(sdev, host) {
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
sdev->queue_depth+1);
else
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
sdev->queue_depth+1);
}
spin_lock_irq(&phba->hbalock);
scsi_host_put(host);
}
spin_unlock_irq(&phba->hbalock);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
/*
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
......@@ -154,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
}
static void
lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
unsigned long iflag = 0;
......@@ -165,13 +314,16 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
}
static int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
? lpfc_cmd->cur_iocbq.vport->vpi
: 0);
dma_addr_t physaddr;
uint32_t i, num_bde = 0;
int datadir = scsi_cmnd->sc_data_direction;
......@@ -235,9 +387,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
dma_error = dma_mapping_error(physaddr);
if (dma_error) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0718 Unable to dma_map_single "
"request_buffer: x%x\n",
phba->brd_no, dma_error);
"%d (%d):0718 Unable to dma_map_single "
"request_buffer: x%x\n",
phba->brd_no, vpi, dma_error);
return 1;
}
......@@ -299,6 +451,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
struct lpfc_hba *phba = vport->phba;
uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
uint32_t vpi = vport->vpi;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
......@@ -331,9 +484,9 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
logit = LOG_FCP;
lpfc_printf_log(phba, KERN_WARNING, logit,
"%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
"%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no, cmnd->cmnd[0], scsi_status,
phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen),
......@@ -354,10 +507,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
cmnd->resid = be32_to_cpu(fcprsp->rspResId);
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0716 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n", phba->brd_no,
be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
"%d (%d):0716 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n",
phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
cmnd->resid, fcpi_parm, cmnd->cmnd[0],
cmnd->underflow);
/*
* If there is an under run check if under run reported by
......@@ -368,12 +522,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcpi_parm &&
(cmnd->resid != fcpi_parm)) {
lpfc_printf_log(phba, KERN_WARNING,
LOG_FCP | LOG_FCP_ERROR,
"%d:0735 FCP Read Check Error and Underrun "
"Data: x%x x%x x%x x%x\n", phba->brd_no,
be32_to_cpu(fcpcmd->fcpDl),
cmnd->resid,
fcpi_parm, cmnd->cmnd[0]);
LOG_FCP | LOG_FCP_ERROR,
"%d (%d):0735 FCP Read Check Error "
"and Underrun Data: x%x x%x x%x x%x\n",
phba->brd_no, vpi,
be32_to_cpu(fcpcmd->fcpDl),
cmnd->resid, fcpi_parm, cmnd->cmnd[0]);
cmnd->resid = cmnd->request_bufflen;
host_status = DID_ERROR;
}
......@@ -387,19 +541,20 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
(scsi_status == SAM_STAT_GOOD) &&
(cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0717 FCP command x%x residual "
"%d (%d):0717 FCP command x%x residual "
"underrun converted to error "
"Data: x%x x%x x%x\n", phba->brd_no,
cmnd->cmnd[0], cmnd->request_bufflen,
cmnd->resid, cmnd->underflow);
"Data: x%x x%x x%x\n",
phba->brd_no, vpi, cmnd->cmnd[0],
cmnd->request_bufflen, cmnd->resid,
cmnd->underflow);
host_status = DID_ERROR;
}
} else if (resp_info & RESID_OVER) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0720 FCP command x%x residual "
"%d (%d):0720 FCP command x%x residual "
"overrun error. Data: x%x x%x \n",
phba->brd_no, cmnd->cmnd[0],
phba->brd_no, vpi, cmnd->cmnd[0],
cmnd->request_bufflen, cmnd->resid);
host_status = DID_ERROR;
......@@ -410,11 +565,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"%d:0734 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n", phba->brd_no,
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0]);
"%d (%d):0734 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n",
phba->brd_no, vpi,
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0]);
host_status = DID_ERROR;
cmnd->resid = cmnd->request_bufflen;
}
......@@ -433,6 +589,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
? lpfc_cmd->cur_iocbq.vport->vpi
: 0);
int result;
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
......@@ -448,11 +607,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DEFAULT;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0729 FCP cmd x%x failed <%d/%d> status: "
"x%x result: x%x Data: x%x x%x\n",
phba->brd_no, cmd->cmnd[0], cmd->device->id,
cmd->device->lun, lpfc_cmd->status,
lpfc_cmd->result, pIocbOut->iocb.ulpContext,
"%d (%d):0729 FCP cmd x%x failed <%d/%d> "
"status: x%x result: x%x Data: x%x x%x\n",
phba->brd_no, vpi, cmd->cmnd[0],
cmd->device ? cmd->device->id : 0xffff,
cmd->device ? cmd->device->lun : 0xffff,
lpfc_cmd->status, lpfc_cmd->result,
pIocbOut->iocb.ulpContext,
lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
switch (lpfc_cmd->status) {
......@@ -464,6 +625,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
case IOSTAT_FABRIC_BSY:
cmd->result = ScsiResult(DID_BUS_BUSY, 0);
break;
case IOSTAT_LOCAL_REJECT:
if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
} /* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
break;
......@@ -480,9 +648,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0710 Iodone <%d/%d> cmd %p, error x%x "
"SNS x%x x%x Data: x%x x%x\n",
phba->brd_no, cmd->device->id,
"%d (%d):0710 Iodone <%d/%d> cmd %p, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
phba->brd_no, vpi, cmd->device->id,
cmd->device->lun, cmd, cmd->result,
*lp, *(lp + 3), cmd->retries, cmd->resid);
}
......@@ -497,6 +665,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
return;
}
if (!result)
lpfc_rampup_queue_depth(phba, sdev);
if (!result && pnode != NULL &&
((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
......@@ -545,8 +717,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (depth) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0711 detected queue full - lun queue depth "
" adjusted to %d.\n", phba->brd_no, depth);
"%d (%d):0711 detected queue full - "
"lun queue depth adjusted to %d.\n",
phba->brd_no, vpi, depth);
}
}
......@@ -733,10 +906,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
/* Issue Target Reset to TGT <num> */
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0702 Issue Target Reset to TGT %d "
"%d (%d):0702 Issue Target Reset to TGT %d "
"Data: x%x x%x\n",
phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
rdata->pnode->nlp_flag);
phba->brd_no, vport->vpi, tgt_id,
rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
......@@ -842,9 +1015,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
}
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
lpfc_adjust_queue_depth(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0707 driver's buffer pool is empty, "
"IO busied\n", phba->brd_no);
"%d (%d):0707 driver's buffer pool is empty, "
"IO busied\n",
phba->brd_no, vport->vpi);
goto out_host_busy;
}
......@@ -865,7 +1041,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
goto out_host_busy_free_buf;
......@@ -986,18 +1162,19 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0748 abort handler timed out waiting for "
"abort to complete: ret %#x, ID %d, LUN %d, "
"snum %#lx\n",
phba->brd_no, ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
"%d (%d):0748 abort handler timed out waiting "
"for abort to complete: ret %#x, ID %d, "
"LUN %d, snum %#lx\n",
phba->brd_no, vport->vpi, ret,
cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
}
out:
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0749 SCSI Layer I/O Abort Request "
"%d (%d):0749 SCSI Layer I/O Abort Request "
"Status x%x ID %d LUN %d snum %#lx\n",
phba->brd_no, ret, cmnd->device->id,
phba->brd_no, vport->vpi, ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
return ret;
......@@ -1024,7 +1201,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires.
*/
while ( 1 ) {
while (1) {
if (!pnode)
goto out;
......@@ -1035,9 +1212,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
if (!rdata ||
(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0721 LUN Reset rport failure:"
" cnt x%x rdata x%p\n",
phba->brd_no, loopcnt, rdata);
"%d (%d):0721 LUN Reset rport "
"failure: cnt x%x rdata x%p\n",
phba->brd_no, vport->vpi,
loopcnt, rdata);
goto out;
}
pnode = rdata->pnode;
......@@ -1068,8 +1246,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
goto out_free_scsi_buf;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
"nlp_flag x%x\n", phba->brd_no, cmnd->device->id,
"%d (%d):0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n",
phba->brd_no, vport->vpi, cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
iocb_status = lpfc_sli_issue_iocb_wait(phba,
......@@ -1103,7 +1282,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
cmnd->device->id, cmnd->device->lun,
0, LPFC_CTX_LUN);
loopcnt = 0;
while (cnt) {
while(cnt) {
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
if (++loopcnt
......@@ -1118,8 +1297,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0719 device reset I/O flush failure: cnt x%x\n",
phba->brd_no, cnt);
"%d (%d):0719 device reset I/O flush failure: "
"cnt x%x\n",
phba->brd_no, vport->vpi, cnt);
ret = FAILED;
}
......@@ -1128,10 +1308,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 SCSI layer issued device reset (%d, %d) "
"%d (%d):0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
phba->brd_no, cmnd->device->id, cmnd->device->lun,
ret, cmd_status, cmd_result);
phba->brd_no, vport->vpi, cmnd->device->id,
cmnd->device->lun, ret, cmd_status, cmd_result);
out:
return ret;
......@@ -1184,8 +1364,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0700 Bus Reset on target %d failed\n",
phba->brd_no, i);
"%d (%d):0700 Bus Reset on target %d "
"failed\n",
phba->brd_no, vport->vpi, i);
err_count++;
break;
}
......@@ -1210,7 +1391,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, 0, LPFC_CTX_HOST);
loopcnt = 0;
while (cnt) {
while(cnt) {
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
if (++loopcnt
......@@ -1224,16 +1405,15 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
phba->brd_no, cnt, i);
"%d (%d):0715 Bus Reset I/O flush failure: "
"cnt x%x left x%x\n",
phba->brd_no, vport->vpi, cnt, i);
ret = FAILED;
}
lpfc_printf_log(phba,
KERN_ERR,
LOG_FCP,
"%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
phba->brd_no, ret);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
phba->brd_no, vport->vpi, ret);
out:
return ret;
}
......@@ -1263,17 +1443,24 @@ lpfc_slave_alloc(struct scsi_device *sdev)
*/
total = phba->total_scsi_bufs;
num_to_alloc = phba->cfg_lun_queue_depth + 2;
if (total >= phba->cfg_hba_queue_depth) {
/* Allow some exchanges to be available always to complete discovery */
if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0704 At limitation of %d preallocated "
"command buffers\n", phba->brd_no, total);
"%d (%d):0704 At limitation of %d "
"preallocated command buffers\n",
phba->brd_no, vport->vpi, total);
return 0;
} else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
/* Allow some exchanges to be available always to complete discovery */
} else if (total + num_to_alloc >
phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0705 Allocation request of %d command "
"buffers will exceed max of %d. Reducing "
"allocation request to %d.\n", phba->brd_no,
num_to_alloc, phba->cfg_hba_queue_depth,
"%d (%d):0705 Allocation request of %d "
"command buffers will exceed max of %d. "
"Reducing allocation request to %d.\n",
phba->brd_no, vport->vpi, num_to_alloc,
phba->cfg_hba_queue_depth,
(phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
......@@ -1282,8 +1469,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
scsi_buf = lpfc_new_scsi_buf(vport);
if (!scsi_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0706 Failed to allocate command "
"buffer\n", phba->brd_no);
"%d (%d):0706 Failed to allocate "
"command buffer\n",
phba->brd_no, vport->vpi);
break;
}
......@@ -1331,6 +1519,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
return;
}
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
......
......@@ -44,14 +44,15 @@
* This allows multiple uses of lpfc_msgBlk0311
* w/o perturbing log msg utility.
*/
#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
lpfc_printf_log(phba, \
KERN_INFO, \
LOG_MBOX | LOG_SLI, \
"%d:0311 Mailbox command x%x cannot issue " \
"Data: x%x x%x x%x\n", \
"%d (%d):0311 Mailbox command x%x cannot " \
"issue Data: x%x x%x x%x\n", \
phba->brd_no, \
mb->mbxCommand, \
pmbox->vport ? pmbox->vport->vpi : 0, \
pmbox->mb.mbxCommand, \
phba->pport->port_state, \
psli->sli_flag, \
flag)
......@@ -65,11 +66,10 @@ typedef enum _lpfc_iocb_type {
LPFC_ABORT_IOCB
} lpfc_iocb_type;
/*
* SLI-2/SLI-3 provide different sized iocbs. Given a pointer to the start of
* the ring, and the slot number of the desired iocb entry, calc a pointer to
* that entry.
*/
/* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
* to the start of the ring, and the slot number of the
* desired iocb entry, calc a pointer to that entry.
*/
static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
......@@ -229,13 +229,11 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
lpfc_config_ring(phba, i, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba,
KERN_ERR,
LOG_INIT,
"%d:0446 Adapter failed to init, "
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0446 Adapter failed to init (%d), "
"mbxCmd x%x CFG_RING, mbxStatus x%x, "
"ring %d\n",
phba->brd_no,
phba->brd_no, rc,
pmbox->mbxCommand,
pmbox->mbxStatus,
i);
......@@ -254,9 +252,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
list_add_tail(&piocb->list, &pring->txcmplq);
pring->txcmplq_cnt++;
if (unlikely(pring->ringno == LPFC_ELS_RING))
mod_timer(&piocb->vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov << 1));
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
if (!piocb->vport)
BUG();
else
mod_timer(&piocb->vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov << 1));
}
return 0;
}
......@@ -311,8 +316,10 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
/* hbalock should already be held */
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
return NULL;
}
......@@ -399,7 +406,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Issue iocb command to adapter
*/
lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
wmb();
pring->stats.iocb_cmd++;
......@@ -520,14 +527,14 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
hbqp->next_hbqPutIdx = 0;
if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
uint32_t raw_index = readl(&phba->hbq_get[hbqno]);
uint32_t raw_index = phba->hbq_get[hbqno];
uint32_t getidx = le32_to_cpu(raw_index);
hbqp->local_hbqGetIdx = getidx;
if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI,
LOG_SLI | LOG_VPORT,
"%d:1802 HBQ %d: local_hbqGetIdx "
"%u is > than hbqp->entry_count %u\n",
phba->brd_no, hbqno,
......@@ -548,117 +555,121 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{
uint32_t i;
struct lpfc_dmabuf *dmabuf, *next_dmabuf;
struct hbq_dmabuf *hbq_buf;
if (!phba->hbq_buffer_pool)
return;
/* Return all memory used by all HBQs */
for (i = 0; i < phba->hbq_buffer_count; i++) {
lpfc_hbq_free(phba, phba->hbq_buffer_pool[i].dbuf.virt,
phba->hbq_buffer_pool[i].dbuf.phys);
list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbq_buffer_list, list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
kfree(hbq_buf);
}
kfree(phba->hbq_buffer_pool);
phba->hbq_buffer_pool = NULL;
}
static void
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf_desc)
struct hbq_dmabuf *hbq_buf)
{
struct lpfc_hbq_entry *hbqe;
dma_addr_t physaddr = hbq_buf->dbuf.phys;
/* Get next HBQ entry slot to use */
hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
if (hbqe) {
struct hbq_s *hbqp = &phba->hbqs[hbqno];
hbqe->bde.addrHigh = putPaddrHigh(hbq_buf_desc->dbuf.phys);
hbqe->bde.addrLow = putPaddrLow(hbq_buf_desc->dbuf.phys);
hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
hbqe->bde.tus.f.bdeSize = FCELSSIZE;
hbqe->bde.tus.f.bdeFlags = 0;
hbqe->buffer_tag = hbq_buf_desc->tag;
/* Sync SLIM */
hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
/* Sync SLIM */
hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
/* flush */
/* flush */
readl(phba->hbq_put + hbqno);
phba->hbq_buff_count++;
list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
}
}
static void
lpfc_sli_fill_hbq(struct lpfc_hba *phba, uint32_t hbqno, uint32_t buffer_index)
{
struct hbq_dmabuf *hbq_buf_desc;
uint32_t i;
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
.entry_count = 200,
.mask_count = 0,
.profile = 0,
.ring_mask = 1 << LPFC_ELS_RING,
.buffer_count = 0,
.init_count = 20,
.add_count = 5,
};
for (i = 0; i < phba->hbqs[hbqno].entry_count; i++) {
/* Search hbqbufq, from the begining,
* looking for an unused entry
*/
phba->hbq_buffer_pool[buffer_index + i].tag |= hbqno << 16;
hbq_buf_desc = phba->hbq_buffer_pool + buffer_index + i;
lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf_desc);
}
}
static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
};
int
lpfc_sli_hbqbuf_fill_hbq(struct lpfc_hba *phba)
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{
return 0;
}
uint32_t i, start, end;
struct hbq_dmabuf *hbq_buffer;
static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba)
{
uint32_t buffer_index = 0;
uint32_t hbqno;
start = lpfc_hbq_defs[hbqno]->buffer_count;
end = count + lpfc_hbq_defs[hbqno]->buffer_count;
if (end > lpfc_hbq_defs[hbqno]->entry_count) {
end = lpfc_hbq_defs[hbqno]->entry_count;
}
/* Populate HBQ entries */
for (hbqno = 0; hbqno < phba->hbq_count; ++hbqno) {
/* Find ring associated with HBQ */
lpfc_sli_fill_hbq(phba, hbqno, buffer_index);
buffer_index += phba->hbqs[hbqno].entry_count;
for (i = start; i < end; i++) {
hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
GFP_KERNEL);
if (!hbq_buffer)
return 1;
hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
&hbq_buffer->dbuf.phys);
if (hbq_buffer->dbuf.virt == NULL)
return 1;
hbq_buffer->tag = (i | (hbqno << 16));
lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
lpfc_hbq_defs[hbqno]->buffer_count++;
}
return 0;
}
struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
if ((tag & 0xffff) < phba->hbq_buffer_count)
return phba->hbq_buffer_pool + (tag & 0xffff);
return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
lpfc_hbq_defs[qno]->add_count));
}
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI,
"%d:1803 Bad hbq tag. Data: x%x x%x\n",
phba->brd_no, tag,
phba->hbq_buffer_count);
return NULL;
int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
lpfc_hbq_defs[qno]->init_count));
}
void
lpfc_sli_hbqbuf_free(struct lpfc_hba *phba, void *virt, dma_addr_t phys)
struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
{
uint32_t i, hbqno;
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *hbq_buf;
for (i = 0; i < phba->hbq_buffer_count; i++) {
/* Search hbqbufq, from the begining, looking for a match on
phys */
if (phba->hbq_buffer_pool[i].dbuf.phys == phys) {
hbqno = phba->hbq_buffer_pool[i].tag >> 16;
lpfc_sli_hbq_to_firmware(phba, hbqno,
phba->hbq_buffer_pool + i);
return;
list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
if ((hbq_buf->tag & 0xffff) == tag) {
return hbq_buf;
}
}
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI,
"%d:1804 Cannot find virtual addr for "
"mapped buf. Data x%llx\n",
phba->brd_no, (unsigned long long) phys);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
"%d:1803 Bad hbq tag. Data: x%x x%x\n",
phba->brd_no, tag,
lpfc_hbq_defs[tag >> 16]->buffer_count);
return NULL;
}
void
......@@ -723,6 +734,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_FLASH_WR_ULA:
case MBX_SET_DEBUG:
case MBX_LOAD_EXP_ROM:
case MBX_REG_VPI:
case MBX_UNREG_VPI:
ret = mbxCommand;
break;
default:
......@@ -770,8 +783,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->mb.mbxStatus) {
rpi = pmb->mb.un.varWords[0];
lpfc_unreg_login(phba, rpi, pmb);
pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
......@@ -784,60 +797,25 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int
lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
{
MAILBOX_t *mbox, *pmbox;
MAILBOX_t *pmbox;
LPFC_MBOXQ_t *pmb;
int i, rc;
uint32_t process_next;
unsigned long iflags;
/* We should only get here if we are in SLI2 mode */
if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
return 1;
}
int rc;
LIST_HEAD(cmplq);
phba->sli.slistat.mbox_event++;
/* Get a Mailbox buffer to setup mailbox commands for callback */
if ((pmb = phba->sli.mbox_active)) {
pmbox = &pmb->mb;
mbox = &phba->slim2p->mbx;
/* First check out the status word */
lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
/* Sanity check to ensure the host owns the mailbox */
if (pmbox->mbxOwner != OWN_HOST) {
/* Lets try for a while */
for (i = 0; i < 10240; i++) {
/* First copy command data */
lpfc_sli_pcimem_bcopy(mbox, pmbox,
sizeof (uint32_t));
if (pmbox->mbxOwner == OWN_HOST)
goto mbout;
}
/* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
<status> */
lpfc_printf_log(phba,
KERN_WARNING,
LOG_MBOX | LOG_SLI,
"%d:0304 Stray Mailbox Interrupt "
"mbxCommand x%x mbxStatus x%x\n",
phba->brd_no,
pmbox->mbxCommand,
pmbox->mbxStatus);
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irq(&phba->hbalock);
return 1;
}
/* Get all completed mailboxe buffers into the cmplq */
spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
spin_unlock_irq(&phba->hbalock);
mbout:
del_timer_sync(&phba->sli.mbox_tmo);
/* Get a Mailbox buffer to setup mailbox commands for callback */
do {
list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
if (pmb == NULL)
break;
spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
pmbox = &pmb->mb;
/*
* It is a fatal error if unknown mbox command completion.
......@@ -846,33 +824,33 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
MBX_SHUTDOWN) {
/* Unknow mailbox command compl */
lpfc_printf_log(phba,
KERN_ERR,
LOG_MBOX | LOG_SLI,
"%d:0323 Unknown Mailbox command %x Cmpl\n",
phba->brd_no,
pmbox->mbxCommand);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"%d (%d):0323 Unknown Mailbox command "
"%x Cmpl\n",
phba->brd_no,
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand);
phba->link_state = LPFC_HBA_ERROR;
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
return 0;
continue;
}
phba->sli.mbox_active = NULL;
if (pmbox->mbxStatus) {
phba->sli.slistat.mbox_stat_err++;
if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
/* Mbox cmd cmpl error - RETRYing */
lpfc_printf_log(phba,
KERN_INFO,
LOG_MBOX | LOG_SLI,
"%d:0305 Mbox cmd cmpl error - "
"RETRYing Data: x%x x%x x%x x%x\n",
phba->brd_no,
pmbox->mbxCommand,
pmbox->mbxStatus,
pmbox->un.varWords[0],
phba->pport->port_state);
lpfc_printf_log(phba, KERN_INFO,
LOG_MBOX | LOG_SLI,
"%d (%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
pmb->vport ? pmb->vport->vpi :0,
pmbox->mbxCommand,
pmbox->mbxStatus,
pmbox->un.varWords[0],
pmb->vport->port_state);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
spin_lock_irq(&phba->hbalock);
......@@ -880,17 +858,16 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_SUCCESS)
return 0;
continue;
}
}
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba,
KERN_INFO,
LOG_MBOX | LOG_SLI,
"%d:0307 Mailbox cmd x%x Cmpl x%p "
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no,
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
pmb->mbox_cmpl,
*((uint32_t *) pmbox),
......@@ -903,39 +880,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmbox->un.varWords[6],
pmbox->un.varWords[7]);
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
}
}
do {
process_next = 0; /* by default don't loop */
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Process next mailbox command if there is one */
if ((pmb = lpfc_mbox_get(phba))) {
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
pmb->mb.mbxStatus = MBX_NOT_FINISHED;
pmb->mbox_cmpl(phba,pmb);
process_next = 1;
continue; /* loop back */
}
} else {
spin_unlock_irq(&phba->hbalock);
/* Turn on IOCB processing */
for (i = 0; i < phba->sli.num_rings; i++)
lpfc_sli_turn_on_ring(phba, i);
}
} while (1);
return 0;
}
} while (process_next);
static struct lpfc_dmabuf *
lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
{
struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
return 0;
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (hbq_entry == NULL)
return NULL;
list_del(&hbq_entry->dbuf.list);
new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
if (new_hbq_entry == NULL)
return &hbq_entry->dbuf;
new_hbq_entry->dbuf = hbq_entry->dbuf;
new_hbq_entry->tag = -1;
hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
if (hbq_entry->dbuf.virt == NULL) {
kfree(new_hbq_entry);
return &hbq_entry->dbuf;
}
lpfc_sli_free_hbq(phba, hbq_entry);
return &new_hbq_entry->dbuf;
}
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
......@@ -962,14 +935,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Firmware Workaround */
if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
w5p->hcsw.Rctl = Rctl;
w5p->hcsw.Type = Type;
}
}
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0)
saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
irsp->un.ulpWord[3]);
if (irsp->ulpBdeCount == 2)
saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
irsp->un.ulpWord[15]);
}
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
......@@ -997,17 +980,15 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Unexpected Rctl / Type received */
/* Ring <ringno> handler: unexpected
Rctl <Rctl> Type <Type> received */
lpfc_printf_log(phba,
KERN_WARNING,
LOG_SLI,
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0313 Ring %d handler: unexpected Rctl x%x "
"Type x%x received \n",
"Type x%x received\n",
phba->brd_no,
pring->ringno,
Rctl,
Type);
}
return(1);
return 1;
}
static struct lpfc_iocbq *
......@@ -1022,7 +1003,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del(&cmd_iocb->list);
list_del_init(&cmd_iocb->list);
pring->txcmplq_cnt--;
return cmd_iocb;
}
......@@ -1079,18 +1060,18 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Ring <ringno> handler: unexpected completion IoTag
* <IoTag>
*/
lpfc_printf_log(phba,
KERN_WARNING,
LOG_SLI,
"%d:0322 Ring %d handler: unexpected "
"completion IoTag x%x Data: x%x x%x x%x x%x\n",
phba->brd_no,
pring->ringno,
saveq->iocb.ulpIoTag,
saveq->iocb.ulpStatus,
saveq->iocb.un.ulpWord[4],
saveq->iocb.ulpCommand,
saveq->iocb.ulpContext);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d (%d):0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
cmdiocbp->vport->vpi,
pring->ringno,
saveq->iocb.ulpIoTag,
saveq->iocb.ulpStatus,
saveq->iocb.un.ulpWord[4],
saveq->iocb.ulpCommand,
saveq->iocb.ulpContext);
}
}
......@@ -1103,7 +1084,6 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
&phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
&phba->slim2p->mbx.us.s2.port[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
......@@ -1123,8 +1103,10 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
/* hbalock should already be held */
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
return;
}
......@@ -1171,7 +1153,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
sizeof(IOCB_t));
phba->iocb_rsp_size);
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
pring->stats.iocb_rsp++;
......@@ -1342,16 +1324,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
rsp_cmpl++;
if (unlikely(irsp->ulpStatus)) {
/*
* If resource errors reported from HBA, reduce
* queuedepths of the SCSI device.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_adjust_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0336 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
irsp->un.ulpWord[0], irsp->un.ulpWord[1],
irsp->un.ulpWord[2], irsp->un.ulpWord[3],
irsp->un.ulpWord[4], irsp->un.ulpWord[5],
*(((uint32_t *) irsp) + 6),
*(((uint32_t *) irsp) + 7));
"%d:0336 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
*(((uint32_t *) irsp) + 6),
*(((uint32_t *) irsp) + 7));
}
switch (type) {
......@@ -1365,7 +1361,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0333 IOCB cmd 0x%x"
" processed. Skipping"
" completion\n", phba->brd_no,
" completion\n",
phba->brd_no,
irsp->ulpCommand);
break;
}
......@@ -1402,11 +1399,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type, irsp->ulpCommand,
irsp->ulpStatus, irsp->ulpIoTag,
irsp->ulpContext);
"%d:0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type,
irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
}
break;
}
......@@ -1446,7 +1445,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
return rc;
}
int
lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
......@@ -1484,8 +1482,8 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0303 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no,
pring->ringno, portRspPut, portRspMax);
phba->brd_no, pring->ringno, portRspPut,
portRspMax);
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irqrestore(&phba->hbalock, iflag);
......@@ -1551,6 +1549,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
pring->stats.iocb_rsp++;
/*
* If resource errors reported from HBA, reduce
* queuedepths of the SCSI device.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_adjust_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
if (irsp->ulpStatus) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
......@@ -1634,16 +1643,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
"%d:0335 Unknown IOCB command "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0335 Unknown IOCB "
"command Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
}
}
......@@ -1656,6 +1664,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
}
__lpfc_sli_release_iocbq(phba, saveq);
}
rspiocbp = NULL;
}
/*
......@@ -1668,7 +1677,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
}
} /* while (pring->rspidx != portRspPut) */
if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
/* At least one response entry has been freed */
pring->stats.iocb_rsp_full++;
/* SET RxRE_RSP in Chip Att register */
......@@ -1700,6 +1709,10 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *cmd = NULL;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
/* Error everything on txq and txcmplq
* First do the txq.
*/
......@@ -1716,7 +1729,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
list_del(&iocb->list);
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
......@@ -1757,7 +1770,7 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
if (i == 15) {
/* Do post */
phba->pport->port_state = LPFC_STATE_UNKNOWN;
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
......@@ -1862,8 +1875,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
/* Kill HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0329 Kill HBA Data: x%x x%x\n",
phba->brd_no, phba->pport->port_state, psli->sli_flag);
"%d:0329 Kill HBA Data: x%x x%x\n",
phba->brd_no, phba->pport->port_state, psli->sli_flag);
if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL)) == 0)
......@@ -2087,7 +2100,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (i == 15) {
/* Do post */
phba->pport->port_state = LPFC_STATE_UNKNOWN;
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
......@@ -2117,55 +2130,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return 0;
}
static struct hbq_dmabuf *
lpfc_alloc_hbq_buffers(struct lpfc_hba *phba, int count)
{
struct hbq_dmabuf *hbq_buffer_pool;
int i;
hbq_buffer_pool = kmalloc(count * sizeof(struct hbq_dmabuf),
GFP_KERNEL);
if (!hbq_buffer_pool)
goto out;
for (i = 0; i < count; ++i) {
hbq_buffer_pool[i].dbuf.virt =
lpfc_hbq_alloc(phba, MEM_PRI,
&hbq_buffer_pool[i].dbuf.phys);
if (hbq_buffer_pool[i].dbuf.virt == NULL)
goto alloc_failed;
hbq_buffer_pool[i].tag = i;
}
goto out;
alloc_failed:
while (--i >= 0)
lpfc_hbq_free(phba, hbq_buffer_pool[i].dbuf.virt,
hbq_buffer_pool[i].dbuf.phys);
kfree(hbq_buffer_pool);
hbq_buffer_pool = NULL;
out:
phba->hbq_buffer_pool = hbq_buffer_pool;
return hbq_buffer_pool;
}
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
.entry_count = 1200,
.mask_count = 0,
.profile = 0,
.ring_mask = 1 << LPFC_ELS_RING,
};
static struct lpfc_hbq_init *lpfc_hbq_definitions[] = {
&lpfc_els_hbq,
};
static int
lpfc_sli_hbq_count(void)
{
return ARRAY_SIZE(lpfc_hbq_definitions);
return ARRAY_SIZE(lpfc_hbq_defs);
}
static int
......@@ -2176,7 +2144,7 @@ lpfc_sli_hbq_entry_count(void)
int i;
for (i = 0; i < hbq_count; ++i)
count += lpfc_hbq_definitions[i]->entry_count;
count += lpfc_hbq_defs[i]->entry_count;
return count;
}
......@@ -2194,18 +2162,10 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
MAILBOX_t *pmbox;
uint32_t hbqno;
uint32_t hbq_entry_index;
uint32_t hbq_buffer_count;
/* count hbq buffers */
hbq_buffer_count = lpfc_sli_hbq_entry_count();
if (!lpfc_alloc_hbq_buffers(phba, hbq_buffer_count))
return -ENOMEM;
phba->hbq_buffer_count = hbq_buffer_count;
/* Get a Mailbox buffer to setup mailbox
* commands for HBA initialization
*/
/* Get a Mailbox buffer to setup mailbox
* commands for HBA initialization
*/
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb)
......@@ -2222,9 +2182,9 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
phba->hbqs[hbqno].hbqPutIdx = 0;
phba->hbqs[hbqno].local_hbqGetIdx = 0;
phba->hbqs[hbqno].entry_count =
lpfc_hbq_definitions[hbqno]->entry_count;
lpfc_config_hbq(phba, lpfc_hbq_definitions[hbqno],
hbq_entry_index, pmb);
lpfc_hbq_defs[hbqno]->entry_count;
lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
pmb);
hbq_entry_index += phba->hbqs[hbqno].entry_count;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
......@@ -2232,7 +2192,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
mbxStatus <status>, ring <num> */
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI,
LOG_SLI | LOG_VPORT,
"%d:1805 Adapter failed to init. "
"Data: x%x x%x x%x\n",
phba->brd_no, pmbox->mbxCommand,
......@@ -2240,17 +2200,18 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
mempool_free(pmb, phba->mbox_mem_pool);
/* Free all HBQ memory */
lpfc_sli_hbqbuf_free_all(phba);
return ENXIO;
}
}
phba->hbq_count = hbq_count;
/* Initially populate or replenish the HBQs */
lpfc_sli_hbqbuf_fill_hbqs(phba);
mempool_free(pmb, phba->mbox_mem_pool);
/* Initially populate or replenish the HBQs */
for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
return -ENOMEM;
}
return 0;
}
......@@ -2271,7 +2232,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irq(&phba->hbalock);
phba->pport->port_state = LPFC_STATE_UNKNOWN;
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
msleep(2500);
rc = lpfc_sli_chipset_init(phba);
......@@ -2301,20 +2262,20 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0442 Adapter failed to init, "
"mbxCmd x%x CONFIG_PORT, mbxStatus "
"x%x Data: x%x\n",
phba->brd_no, pmb->mb.mbxCommand,
pmb->mb.mbxStatus, 0);
"%d:0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
phba->brd_no, pmb->mb.mbxCommand,
pmb->mb.mbxStatus, 0);
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
rc = -ENXIO;
} else {
done = 1;
/* DBG: Do we need max_vpi, reg_vpi for that matter
phba->max_vpi = 0;
*/
phba->max_vpi = (phba->max_vpi &&
pmb->mb.un.varCfgPort.gmv) != 0
? pmb->mb.un.varCfgPort.max_vpi
: 0;
}
}
......@@ -2324,13 +2285,13 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
}
if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
(!pmb->mb.un.varCfgPort.cMA)) {
(!pmb->mb.un.varCfgPort.cMA)) {
rc = -ENXIO;
goto do_prep_failed;
}
return rc;
do_prep_failed:
do_prep_failed:
mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
......@@ -2339,17 +2300,24 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
int mode = 3;
int mode = 3;
switch (lpfc_sli_mode) {
case 2:
if (lpfc_npiv_enable) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d:1824 NPIV enabled: Override lpfc_sli_mode "
"parameter (%d) to auto (0).\n",
phba->brd_no, lpfc_sli_mode);
break;
}
mode = 2;
break;
case 0:
case 3:
break;
default:
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d:1819 Unrecognized lpfc_sli_mode "
"parameter: %d.\n",
phba->brd_no, lpfc_sli_mode);
......@@ -2359,7 +2327,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
rc = lpfc_do_config_port(phba, mode);
if (rc && lpfc_sli_mode == 3)
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d:1820 Unable to select SLI-3. "
"Not supported by adapter.\n",
phba->brd_no);
......@@ -2377,18 +2345,18 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
} else {
phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
phba->sli3_options = 0x0;
phba->sli3_options = 0;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0444 Firmware in SLI %x mode.\n",
phba->brd_no, phba->sli_rev);
"%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
phba->brd_no, phba->sli_rev, phba->max_vpi);
rc = lpfc_sli_ring_map(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
/* Init HBQs */
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
......@@ -2404,7 +2372,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
return rc;
lpfc_sli_hba_setup_error:
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0445 Firmware initialization failed\n",
......@@ -2428,19 +2396,21 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
void
lpfc_mbox_timeout(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) phba;
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflag;
uint32_t tmo_posted;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
tmo_posted = (phba->pport->work_port_events & WORKER_MBOX_TMO);
tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
if (!tmo_posted) {
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
}
......@@ -2458,12 +2428,13 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
phba->brd_no,
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
phba->sli.mbox_active);
"%d:0310 Mailbox command x%x timeout Data: x%x x%x "
"x%p\n",
phba->brd_no,
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
phba->sli.mbox_active);
/* Setting state unknown so lpfc_sli_abort_iocb_ring
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
......@@ -2510,10 +2481,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
void __iomem *to_slim;
if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
if(!pmbox->vport) {
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX,
LOG_MBOX | LOG_VPORT,
"%d:1806 Mbox x%x failed. No vport\n",
phba->brd_no,
pmbox->mb.mbxCommand);
......@@ -2522,12 +2493,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
}
}
/* If the PCI channel is in offline state, do not post mbox. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return MBX_NOT_FINISHED;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
psli = &phba->sli;
mb = &pmbox->mb;
status = MBX_SUCCESS;
......@@ -2535,14 +2509,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
return MBX_NOT_FINISHED;
}
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
return MBX_NOT_FINISHED;
}
......@@ -2556,14 +2530,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag);
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED;
}
if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag);
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED;
}
......@@ -2589,10 +2563,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Mbox cmd issue - BUSY */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
phba->brd_no,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
"%d (%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
phba->brd_no,
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
......@@ -2626,7 +2602,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED;
}
/* timeout active mbox command */
......@@ -2636,10 +2612,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Mailbox cmd <cmd> issue */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
phba->brd_no,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
"%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_cmd++;
evtctr = psli->slistat.mbox_event;
......@@ -2654,7 +2631,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
MAILBOX_CMD_SIZE);
MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
......@@ -2756,14 +2733,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
MAILBOX_CMD_SIZE);
MAILBOX_CMD_SIZE);
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
pmbox->context2) {
lpfc_memcpy_from_slim((void *) pmbox->context2,
lpfc_memcpy_from_slim((void *)pmbox->context2,
phba->MBslimaddr + DMP_RSP_OFFSET,
mb->un.varDmp.word_cnt);
}
......@@ -2780,17 +2757,16 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
return status;
}
/*
* Caller needs to hold lock.
*/
static int
lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
unsigned long iflags;
/* Insert the caller's iocb in the txq tail for later processing. */
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&piocb->list, &pring->txq);
pring->txq_cnt++;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
}
......@@ -2809,14 +2785,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return nextiocb;
}
/*
* Lockless version of lpfc_sli_issue_iocb.
*/
int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_iocbq *nextiocb;
unsigned long iflags;
IOCB_t *iocb;
if (piocb->iocb_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"%d:1807 IOCB x%x failed. No vport\n",
phba->brd_no,
piocb->iocb.ulpCommand);
dump_stack();
return IOCB_ERROR;
}
/* If the PCI channel is in offline state, do not post iocbs. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return IOCB_ERROR;
......@@ -2862,10 +2853,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* attention events.
*/
} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
!(phba->sli.sli_flag & LPFC_PROCESS_LA)))
!(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
goto iocb_busy;
}
spin_lock_irqsave(&phba->hbalock, iflags);
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
(nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
......@@ -2874,7 +2865,6 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_sli_update_ring(phba, pring);
else
lpfc_sli_update_full_ring(phba, pring);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!piocb)
return IOCB_SUCCESS;
......@@ -2882,20 +2872,33 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto out_busy;
iocb_busy:
spin_lock_irqsave(&phba->hbalock, iflags);
pring->stats.iocb_cmd_delay++;
spin_unlock_irqrestore(&phba->hbalock, iflags);
out_busy:
if (!(flag & SLI_IOCB_RET_IOCB)) {
lpfc_sli_ringtx_put(phba, pring, piocb);
__lpfc_sli_ringtx_put(phba, pring, piocb);
return IOCB_SUCCESS;
}
return IOCB_BUSY;
}
int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
{
unsigned long iflags;
int rc;
spin_lock_irqsave(&phba->hbalock, iflags);
rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return rc;
}
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
{
......@@ -2960,14 +2963,14 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
pring->sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->iotag_ctr = 0;
pring->iotag_max =
(phba->cfg_hba_queue_depth * 2);
(phba->cfg_hba_queue_depth * 2);
pring->fast_iotag = pring->iotag_max;
pring->num_mask = 0;
break;
......@@ -2976,11 +2979,11 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
pring->sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->iotag_max = phba->cfg_hba_queue_depth;
pring->num_mask = 0;
break;
......@@ -2989,11 +2992,11 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
pring->sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->fast_iotag = 0;
pring->iotag_ctr = 0;
pring->iotag_max = 4096;
......@@ -3002,30 +3005,30 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->prt[0].rctl = FC_ELS_REQ;
pring->prt[0].type = FC_ELS_DATA;
pring->prt[0].lpfc_sli_rcv_unsol_event =
lpfc_els_unsol_event;
lpfc_els_unsol_event;
pring->prt[1].profile = 0; /* Mask 1 */
pring->prt[1].rctl = FC_ELS_RSP;
pring->prt[1].type = FC_ELS_DATA;
pring->prt[1].lpfc_sli_rcv_unsol_event =
lpfc_els_unsol_event;
lpfc_els_unsol_event;
pring->prt[2].profile = 0; /* Mask 2 */
/* NameServer Inquiry */
pring->prt[2].rctl = FC_UNSOL_CTL;
/* NameServer */
pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
pring->prt[2].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
lpfc_ct_unsol_event;
pring->prt[3].profile = 0; /* Mask 3 */
/* NameServer response */
pring->prt[3].rctl = FC_SOL_CTL;
/* NameServer */
pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
lpfc_ct_unsol_event;
break;
}
totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
(pring->numRiocb * pring->sizeRiocb);
(pring->numRiocb * pring->sizeRiocb);
}
if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
/* Too many cmd / rsp ring entries in SLI2 SLIM */
......@@ -3051,6 +3054,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
psli = &phba->sli;
spin_lock_irq(&phba->hbalock);
INIT_LIST_HEAD(&psli->mboxq);
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
......@@ -3067,6 +3071,64 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
return 1;
}
int
lpfc_sli_host_down(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *icmd = NULL;
int i;
unsigned long flags = 0;
uint16_t prev_pring_flag;
lpfc_cleanup_discovery_resources(vport);
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
prev_pring_flag = pring->flag;
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/*
* Error everything on the txq since these iocbs have not been
* given to the FW yet.
*/
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
if (iocb->vport != vport)
continue;
list_del_init(&iocb->list);
pring->txq_cnt--;
if (iocb->iocb_cmpl) {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
spin_unlock_irqrestore(&phba->hbalock, flags);
(iocb->iocb_cmpl) (phba, iocb, iocb);
spin_lock_irqsave(&phba->hbalock, flags);
} else
lpfc_sli_release_iocbq(phba, iocb);
}
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
list) {
if (iocb->vport != vport)
continue;
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
pring->flag = prev_pring_flag;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 1;
}
int
lpfc_sli_hba_down(struct lpfc_hba *phba)
{
......@@ -3081,6 +3143,8 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
lpfc_hba_down_prep(phba);
lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
......@@ -3097,9 +3161,8 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, flags);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
list_del(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
......@@ -3112,34 +3175,33 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
spin_lock_irqsave(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
spin_lock(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
spin_unlock(&phba->pport->work_port_lock);
spin_lock_irqsave(&phba->hbalock, flags);
pmb = psli->mbox_active;
if (pmb) {
if (psli->mbox_active) {
list_add_tail(&psli->mbox_active->list, &completions);
psli->mbox_active = NULL;
pmb->mb.mbxStatus = MBX_NOT_FINISHED;
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
if (pmb->mbox_cmpl) {
pmb->mbox_cmpl(phba,pmb);
}
}
/* Return any pending mbox cmds */
while ((pmb = lpfc_mbox_get(phba)) != NULL) {
/* Return any pending or completed mbox cmds */
list_splice_init(&phba->sli.mboxq, &completions);
list_splice_init(&phba->sli.mboxq_cmpl, &completions);
INIT_LIST_HEAD(&psli->mboxq);
INIT_LIST_HEAD(&psli->mboxq_cmpl);
spin_unlock_irqrestore(&phba->hbalock, flags);
while (!list_empty(&completions)) {
list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
pmb->mb.mbxStatus = MBX_NOT_FINISHED;
if (pmb->mbox_cmpl) {
pmb->mbox_cmpl(phba,pmb);
}
}
INIT_LIST_HEAD(&psli->mboxq);
/* Free all HBQ memory */
lpfc_sli_hbqbuf_free_all(phba);
return 1;
}
......@@ -3196,7 +3258,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0410 Cannot find virtual addr for mapped buf on "
"ring %d Data x%llx x%p x%p x%x\n",
phba->brd_no, pring->ringno, (unsigned long long) phys,
phba->brd_no, pring->ringno, (unsigned long long)phys,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
}
......@@ -3207,7 +3269,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
struct lpfc_iocbq *abort_iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
abort_iocb = NULL;
......@@ -3220,11 +3282,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0327 Cannot abort els iocb %p"
" with tag %x context %x\n",
phba->brd_no, abort_iocb,
abort_iotag, abort_context);
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
"%d:0327 Cannot abort els iocb %p "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
phba->brd_no, abort_iocb, abort_iotag,
abort_context, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/*
* make sure we have the right iocbq before taking it
......@@ -3235,23 +3299,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
spin_unlock_irq(&phba->hbalock);
else {
list_del(&abort_iocb->list);
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
spin_unlock_irq(&phba->hbalock);
rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
if (rsp_ab_iocb == NULL)
lpfc_sli_release_iocbq(phba, abort_iocb);
else {
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
rsp_ab_iocb->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
rsp_ab_iocb->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb,
rsp_ab_iocb);
lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
}
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
......@@ -3259,6 +3314,23 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
static void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
"x%x x%x x%x\n",
phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
......@@ -3269,22 +3341,30 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
/* There are certain command types we don't want
* to abort.
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
* being aborted.
*/
icmd = &cmdiocb->iocb;
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
icmd->ulpCommand == CMD_CLOSE_XRI_CN)
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
/* If we're unloading, interrupts are disabled so we
* need to cleanup the iocb here.
/* If we're unloading, don't abort the iocb, but change the callback so
* that nothing happens when it finishes.
*/
if (vport->load_flag & FC_UNLOADING)
if (vport->load_flag & FC_UNLOADING) {
if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
else
cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
goto abort_iotag_exit;
}
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = lpfc_sli_get_iocbq(phba);
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return 0;
......@@ -3308,11 +3388,12 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0339 Abort xri x%x, original iotag x%x, abort "
"cmd iotag x%x\n",
phba->brd_no, iabt->un.acxri.abortContextTag,
"%d (%d):0339 Abort xri x%x, original iotag x%x, "
"abort cmd iotag x%x\n",
phba->brd_no, vport->vpi,
iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
abort_iotag_exit:
/*
......@@ -3471,6 +3552,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
* lpfc_sli_issue_call since the wake routine sets a unique value and by
* definition this is a wait function.
*/
int
lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
......@@ -3558,9 +3640,8 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
int retval;
/* The caller must leave context1 empty. */
if (pmboxq->context1 != 0) {
if (pmboxq->context1 != 0)
return MBX_NOT_FINISHED;
}
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
......@@ -3630,6 +3711,10 @@ lpfc_intr_handler(int irq, void *dev_id)
int i;
uint32_t control;
MAILBOX_t *mbox, *pmbox;
LPFC_MBOXQ_t *pmb;
int rc;
/*
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
......@@ -3729,10 +3814,71 @@ lpfc_intr_handler(int irq, void *dev_id)
phba->pport->stopped = 1;
}
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active)) {
pmb = phba->sli.mbox_active;
pmbox = &pmb->mb;
mbox = &phba->slim2p->mbx;
/* First check out the status word */
lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
if (pmbox->mbxOwner != OWN_HOST) {
/*
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
*/
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
LOG_SLI,
"%d (%d):0304 Stray Mailbox "
"Interrupt mbxCommand x%x "
"mbxStatus x%x\n",
phba->brd_no,
(pmb->vport
? pmb->vport->vpi
: 0),
pmbox->mbxCommand,
pmbox->mbxStatus);
}
del_timer_sync(&phba->sli.mbox_tmo);
spin_lock(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock(&phba->pport->work_port_lock);
phba->sli.mbox_active = NULL;
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
}
lpfc_mbox_cmpl_put(phba, pmb);
}
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
send_next_mbox:
spin_lock(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmb = lpfc_mbox_get(phba);
spin_unlock(&phba->hbalock);
/* Process next mailbox command if there is one */
if (pmb != NULL) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
pmb->mb.mbxStatus = MBX_NOT_FINISHED;
lpfc_mbox_cmpl_put(phba, pmb);
goto send_next_mbox;
}
} else {
/* Turn on IOCB processing */
for (i = 0; i < phba->sli.num_rings; i++)
lpfc_sli_turn_on_ring(phba, i);
}
}
spin_lock(&phba->hbalock);
phba->work_ha |= work_ha_copy;
if (phba->work_wait)
wake_up(phba->work_wait);
lpfc_worker_wake_up(phba);
spin_unlock(&phba->hbalock);
}
......
......@@ -44,6 +44,7 @@ struct lpfc_iocbq {
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
uint8_t abort_count;
uint8_t rsvd2;
......@@ -58,6 +59,8 @@ struct lpfc_iocbq {
struct lpfcMboxq *mbox;
} context_un;
void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
......@@ -173,7 +176,7 @@ struct lpfc_sli_ring {
/* Structure used for configuring rings to a specific profile or rctl / type */
struct lpfc_hbq_init {
uint32_t rn; /* Receive buffer notification */
uint32_t entry_count; /* # of entries in HBQ */
uint32_t entry_count; /* max # of entries in HBQ */
uint32_t headerLen; /* 0 if not profile 4 or 5 */
uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
uint32_t profile; /* Selection profile 0=all, 7=logentry */
......@@ -188,6 +191,11 @@ struct lpfc_hbq_init {
uint32_t cmdmatch[8];
uint32_t mask_count; /* number of mask entries in prt array */
struct hbq_mask hbqMasks[6];
/* Non-config rings fields to keep track of buffer allocations */
uint32_t buffer_count; /* number of buffers allocated */
uint32_t init_count; /* number to allocate when initialized */
uint32_t add_count; /* number to allocate when starved */
} ;
#define LPFC_MAX_HBQ 16
......@@ -238,6 +246,7 @@ struct lpfc_sli {
uint16_t mboxq_cnt; /* current length of queue */
uint16_t mboxq_max; /* max length */
LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
struct list_head mboxq_cmpl;
struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
cmd */
......@@ -250,12 +259,6 @@ struct lpfc_sli {
struct lpfc_lnk_stat lnk_stat_offsets;
};
/* Given a pointer to the start of the ring, and the slot number of
* the desired iocb entry, calc a pointer to that entry.
* (assume iocb entry size is 32 bytes, or 8 words)
*/
#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
......
......@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.1.12_sli3"
#define LPFC_DRIVER_VERSION "8.2.0"
#define LPFC_DRIVER_NAME "lpfc"
......
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
#include "lpfc_vport.h"
inline void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state)
{
struct fc_vport *fc_vport = vport->fc_vport;
if (fc_vport) {
/*
* When the transport defines fc_vport_set state we will replace
* this code with the following line
*/
/* fc_vport_set_state(fc_vport, new_state); */
if (new_state != FC_VPORT_INITIALIZING)
fc_vport->vport_last_state = fc_vport->vport_state;
fc_vport->vport_state = new_state;
}
/* for all the error states we will set the invternal state to FAILED */
switch (new_state) {
case FC_VPORT_NO_FABRIC_SUPP:
case FC_VPORT_NO_FABRIC_RSCS:
case FC_VPORT_FABRIC_LOGOUT:
case FC_VPORT_FABRIC_REJ_WWN:
case FC_VPORT_FAILED:
vport->port_state = LPFC_VPORT_FAILED;
break;
case FC_VPORT_LINKDOWN:
vport->port_state = LPFC_VPORT_UNKNOWN;
break;
default:
/* do nothing */
break;
}
}
static int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
int vpi;
spin_lock_irq(&phba->hbalock);
vpi = find_next_zero_bit(phba->vpi_bmask, phba->max_vpi, 1);
if (vpi > phba->max_vpi)
vpi = 0;
else
set_bit(vpi, phba->vpi_bmask);
spin_unlock_irq(&phba->hbalock);
return vpi;
}
static void
lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
{
spin_lock_irq(&phba->hbalock);
clear_bit(vpi, phba->vpi_bmask);
spin_unlock_irq(&phba->hbalock);
}
static int
lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
return -ENOMEM;
}
mb = &pmb->mb;
lpfc_read_sparam(phba, pmb, vport->vpi);
/*
* Grab buffer pointer and clear context1 so we can use
* lpfc_sli_issue_box_wait
*/
mp = (struct lpfc_dmabuf *) pmb->context1;
pmb->context1 = NULL;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d (%d):1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
phba->brd_no, vport->vpi,
mb->mbxCommand, mb->mbxStatus, rc);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
return 0;
}
static int
lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
const char *name_type)
{
/* ensure that IEEE format 1 addresses
* contain zeros in bits 59-48
*/
if (!((wwn->u.wwn[0] >> 4) == 1 &&
((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
return 1;
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x\n",
phba->brd_no, name_type,
wwn->u.wwn[0], wwn->u.wwn[1],
wwn->u.wwn[2], wwn->u.wwn[3],
wwn->u.wwn[4], wwn->u.wwn[5],
wwn->u.wwn[6], wwn->u.wwn[7]);
return 0;
}
static int
lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
{
struct lpfc_vport *vport;
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport == new_vport)
continue;
/* If they match, return not unique */
if (memcmp(&vport->fc_sparam.portName,
&new_vport->fc_sparam.portName,
sizeof(struct lpfc_name)) == 0)
return 0;
}
return 1;
}
int
lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
{
struct lpfc_nodelist *ndlp;
struct lpfc_vport *pport =
(struct lpfc_vport *) fc_vport->shost->hostdata;
struct lpfc_hba *phba = pport->phba;
struct lpfc_vport *vport = NULL;
int instance;
int vpi;
int rc = VPORT_ERROR;
if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1808 Create VPORT failed: "
"NPIV is not enabled: SLImode:%d\n",
phba->brd_no, phba->sli_rev);
rc = VPORT_INVAL;
goto error_out;
}
vpi = lpfc_alloc_vpi(phba);
if (vpi == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1809 Create VPORT failed: "
"Max VPORTs (%d) exceeded\n",
phba->brd_no, phba->max_vpi);
rc = VPORT_NORESOURCES;
goto error_out;
}
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1810 Create VPORT failed: Cannot get "
"instance number\n", phba->brd_no);
lpfc_free_vpi(phba, vpi);
rc = VPORT_NORESOURCES;
goto error_out;
}
vport = lpfc_create_port(phba, instance, fc_vport);
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1811 Create VPORT failed: vpi x%x\n",
phba->brd_no, vpi);
lpfc_free_vpi(phba, vpi);
rc = VPORT_NORESOURCES;
goto error_out;
}
vport->vpi = vpi;
if (lpfc_vport_sparm(phba, vport)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1813 Create VPORT failed: vpi:%d "
"Cannot get sparam\n",
phba->brd_no, vpi);
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
rc = VPORT_NORESOURCES;
goto error_out;
}
memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
if (fc_vport->node_name != 0)
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
if (fc_vport->port_name != 0)
u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
!lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1821 Create VPORT failed: vpi:%d "
"Invalid WWN format\n",
phba->brd_no, vpi);
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
rc = VPORT_INVAL;
goto error_out;
}
if (!lpfc_unique_wwpn(phba, vport)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1823 Create VPORT failed: vpi:%d "
"Duplicate WWN on HBA\n",
phba->brd_no, vpi);
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
rc = VPORT_INVAL;
goto error_out;
}
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
if ((phba->link_state < LPFC_LINK_UP) ||
(phba->fc_topology == TOPOLOGY_LOOP)) {
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
rc = VPORT_OK;
goto out;
}
if (disable) {
rc = VPORT_OK;
goto out;
}
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
}
rc = VPORT_OK;
out:
lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
error_out:
return rc;
}
int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
long timeout;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (ndlp && phba->link_state >= LPFC_LINK_UP) {
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
}
lpfc_sli_host_down(vport);
/* Mark all nodes for discovery so we can remove them by
* calling lpfc_cleanup_rpis(vport, 1)
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
}
lpfc_cleanup_rpis(vport, 1);
lpfc_stop_vport_timers(vport);
lpfc_unreg_all_rpis(vport);
lpfc_unreg_default_rpis(vport);
/*
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
return VPORT_OK;
}
int
enable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
if ((phba->link_state < LPFC_LINK_UP) ||
(phba->fc_topology == TOPOLOGY_LOOP)) {
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
return VPORT_OK;
}
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
}
return VPORT_OK;
}
int
lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
{
if (disable)
return disable_vport(fc_vport);
else
return enable_vport(fc_vport);
}
int
lpfc_vport_delete(struct fc_vport *fc_vport)
{
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_nodelist *next_ndlp;
struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
long timeout;
int rc = VPORT_ERROR;
/*
* This is a bit of a mess. We want to ensure the shost doesn't get
* torn down until we're done with the embedded lpfc_vport structure.
*
* Beyond holding a reference for this function, we also need a
* reference for outstanding I/O requests we schedule during delete
* processing. But once we scsi_remove_host() we can no longer obtain
* a reference through scsi_host_get().
*
* So we take two references here. We release one reference at the
* bottom of the function -- after delinking the vport. And we
* release the other at the completion of the unreg_vpi that get's
* initiated after we've disposed of all other resources associated
* with the port.
*/
if (!scsi_host_get(shost) || !scsi_host_get(shost))
return VPORT_INVAL;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1812 vport_delete failed: Cannot delete "
"physical host\n", phba->brd_no);
goto out;
}
vport->load_flag |= FC_UNLOADING;
kfree(vport->vname);
fc_remove_host(lpfc_shost_from_vport(vport));
scsi_remove_host(lpfc_shost_from_vport(vport));
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP) {
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, allocate one */
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
goto skip_logo;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
} else {
lpfc_dequeue_node(vport, ndlp);
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
}
skip_logo:
lpfc_sli_host_down(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
/* free any ndlp's in unused state */
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
lpfc_drop_node(vport, ndlp);
}
lpfc_stop_vport_timers(vport);
lpfc_unreg_all_rpis(vport);
lpfc_unreg_default_rpis(vport);
/*
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
rc = VPORT_OK;
out:
scsi_host_put(shost);
return rc;
}
EXPORT_SYMBOL(lpfc_vport_create);
EXPORT_SYMBOL(lpfc_vport_delete);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#ifndef _H_LPFC_VPORT
#define _H_LPFC_VPORT
/* API version values (each will be an individual bit) */
#define VPORT_API_VERSION_1 0x01
/* Values returned via lpfc_vport_getinfo() */
struct vport_info {
uint32_t api_versions;
uint8_t linktype;
#define VPORT_TYPE_PHYSICAL 0
#define VPORT_TYPE_VIRTUAL 1
uint8_t state;
#define VPORT_STATE_OFFLINE 0
#define VPORT_STATE_ACTIVE 1
#define VPORT_STATE_FAILED 2
uint8_t fail_reason;
uint8_t prev_fail_reason;
#define VPORT_FAIL_UNKNOWN 0
#define VPORT_FAIL_LINKDOWN 1
#define VPORT_FAIL_FAB_UNSUPPORTED 2
#define VPORT_FAIL_FAB_NORESOURCES 3
#define VPORT_FAIL_FAB_LOGOUT 4
#define VPORT_FAIL_ADAP_NORESOURCES 5
uint8_t node_name[8]; /* WWNN */
uint8_t port_name[8]; /* WWPN */
struct Scsi_Host *shost;
/* Following values are valid only on physical links */
uint32_t vports_max;
uint32_t vports_inuse;
uint32_t rpi_max;
uint32_t rpi_inuse;
#define VPORT_CNT_INVALID 0xFFFFFFFF
};
/* data used in link creation */
struct vport_data {
uint32_t api_version;
uint32_t options;
#define VPORT_OPT_AUTORETRY 0x01
uint8_t node_name[8]; /* WWNN */
uint8_t port_name[8]; /* WWPN */
/*
* Upon successful creation, vport_shost will point to the new Scsi_Host
* structure for the new virtual link.
*/
struct Scsi_Host *vport_shost;
};
/* API function return codes */
#define VPORT_OK 0
#define VPORT_ERROR -1
#define VPORT_INVAL -2
#define VPORT_NOMEM -3
#define VPORT_NORESOURCES -4
int lpfc_vport_create(struct fc_vport *, bool);
int lpfc_vport_delete(struct fc_vport *);
int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
/*
* queuecommand VPORT-specific return codes. Specified in the host byte code.
* Returned when the virtual link has failed or is not active.
*/
#define DID_VPORT_ERROR 0x0f
#define VPORT_INFO 0x1
#define VPORT_CREATE 0x2
#define VPORT_DELETE 0x4
struct vport_cmd_tag {
uint32_t cmd;
struct vport_data cdata;
struct vport_info cinfo;
void *vport;
int vport_num;
};
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
#endif /* H_LPFC_VPORT */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment