Commit dbec4c90 authored by Suganath Prabu Subramani's avatar Suganath Prabu Subramani Committed by Martin K. Petersen

scsi: mpt3sas: lockless command submission

Performance improvement using block layer tag.

Curent driver gets scsiio tracker and free smid from link list and array
based tracking managed by driver.  Accessing list in main io path is
performance pentaly because of protection using spinlock
"scsi_lookup_lock".

In this patch:

1. Driver removes all link list access from main io path and
   use scmd->request->tag to get free smid.

2. Instead of holding 'struct scsiio_tracker' in its own pool
   driver can embed it into the scsi command.

Driver provides cmd_size in scsi_host_template, so that struct
scsiio_tracker is preallocated by scsi mid layer for each scsi command.
Signed-off-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarSuganath Prabu S <suganath-prabu.subramani@broadcom.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 272e253c
......@@ -889,12 +889,19 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
}
struct scsiio_tracker *
mpt3sas_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *cmd;
if (WARN_ON(!smid) ||
WARN_ON(smid >= ioc->hi_priority_smid))
return NULL;
return &ioc->scsi_lookup[smid - 1];
cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (cmd)
return scsi_cmd_priv(cmd);
return NULL;
}
/**
......@@ -915,7 +922,7 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
struct scsiio_tracker *st;
if (smid < ctl_smid) {
st = mpt3sas_get_st_from_smid(ioc, smid);
st = _get_st_from_smid(ioc, smid);
if (st)
cb_idx = st->cb_idx;
} else if (smid == ctl_smid)
......@@ -1302,15 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
/**
* _base_get_chain_buffer_tracker - obtain chain tracker
* @ioc: per adapter object
* @smid: smid associated to an IO request
* @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
*/
static struct chain_tracker *
_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
struct scsi_cmnd *scmd)
{
struct chain_tracker *chain_req;
struct scsiio_tracker *st;
struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
......@@ -1323,9 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
st = mpt3sas_get_st_from_smid(ioc, smid);
if (st)
list_add_tail(&chain_req->tracker_list, &st->chain_list);
list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return chain_req;
}
......@@ -1940,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
/* initializing the chain flags and pointers */
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
......@@ -1980,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
......@@ -2083,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
/* initializing the pointers */
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
......@@ -2114,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
......@@ -2759,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
}
/**
......@@ -2772,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
}
/**
......@@ -2839,26 +2845,15 @@ u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
unsigned long flags;
struct scsiio_tracker *request;
struct scsiio_tracker *request = scsi_cmd_priv(scmd);
unsigned int tag = scmd->request->tag;
u16 smid;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->free_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
pr_err(MPT3SAS_FMT "%s: smid not available\n",
ioc->name, __func__);
return 0;
}
request = list_entry(ioc->free_list.next,
struct scsiio_tracker, tracker_list);
request->scmd = scmd;
smid = tag + 1;
request->cb_idx = cb_idx;
smid = request->smid;
request->msix_io = _base_get_msix_index(ioc);
list_del(&request->tracker_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
request->smid = smid;
INIT_LIST_HEAD(&request->chain_list);
return smid;
}
......@@ -2904,6 +2899,22 @@ _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
}
}
void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
struct scsiio_tracker *st)
{
if (WARN_ON(st->smid == 0))
return;
st->cb_idx = 0xFF;
st->direct_io = 0;
if (!list_empty(&st->chain_list)) {
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
list_splice_init(&st->chain_list, &ioc->free_chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
}
}
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
......@@ -2917,23 +2928,21 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
unsigned long flags;
int i;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->hi_priority_smid) {
/* scsiio queue */
i = smid - 1;
list_splice_init(&ioc->scsi_lookup[i].chain_list,
&ioc->free_chain_list);
ioc->scsi_lookup[i].cb_idx = 0xFF;
ioc->scsi_lookup[i].scmd = NULL;
ioc->scsi_lookup[i].direct_io = 0;
if (i < ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT)
list_add(&ioc->scsi_lookup[i].tracker_list,
&ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
struct scsiio_tracker *st;
st = _get_st_from_smid(ioc, smid);
if (!st) {
_base_recovery_check(ioc);
return;
}
mpt3sas_base_clear_st(ioc, st);
_base_recovery_check(ioc);
return;
} else if (smid < ioc->internal_smid) {
}
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->internal_smid) {
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
......@@ -3806,10 +3815,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->pcie_sgl_dma_pool) {
for (i = 0; i < ioc->scsiio_depth; i++) {
if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
dma_pool_free(ioc->pcie_sgl_dma_pool,
ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
dma_pool_free(ioc->pcie_sgl_dma_pool,
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
if (ioc->pcie_sgl_dma_pool)
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
......@@ -3823,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
if (ioc->scsi_lookup) {
free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
ioc->scsi_lookup = NULL;
}
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
......@@ -4127,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
ioc->scsi_lookup_pages = get_order(sz);
ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
ioc->name, (int)sz);
goto out;
}
dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
ioc->name, ioc->request, ioc->scsiio_depth));
......@@ -4219,6 +4213,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
nvme_blocks_needed++;
sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->pcie_sg_lookup) {
pr_info(MPT3SAS_FMT
"PCIe SGL lookup: kzalloc failed\n", ioc->name);
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
......@@ -4229,11 +4230,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
dma_pool_alloc(ioc->pcie_sgl_dma_pool,
GFP_KERNEL,
&ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
pr_info(MPT3SAS_FMT
"PCIe SGL pool: dma_pool_alloc failed\n",
ioc->name);
......@@ -5783,20 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
kfree(delayed_event_ack);
}
/* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
INIT_LIST_HEAD(&ioc->free_list);
smid = 1;
for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
ioc->scsi_lookup[i].cb_idx = 0xFF;
ioc->scsi_lookup[i].smid = smid;
ioc->scsi_lookup[i].scmd = NULL;
ioc->scsi_lookup[i].direct_io = 0;
if (i < ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT)
list_add_tail(&ioc->scsi_lookup[i].tracker_list,
&ioc->free_list);
}
/* hi-priority queue */
INIT_LIST_HEAD(&ioc->hpr_free_list);
......
......@@ -772,20 +772,17 @@ struct chain_tracker {
/**
* struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
* @scmd: scsi request pointer
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
* @tracker_list: list of free request (ioc->free_list)
* @chain_list: list of associated firmware chain tracker
* @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
struct list_head tracker_list;
u16 msix_io;
};
......@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
struct scsiio_tracker *scsi_lookup;
ulong scsi_lookup_pages;
struct pcie_sg_list *pcie_sg_lookup;
spinlock_t scsi_lookup_lock;
struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
......@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
u16 chains_needed_per_io;
u32 chain_depth;
u16 chain_segment_sz;
u16 chains_per_prp_buffer;
/* hi-priority queue */
u16 hi_priority_smid;
......@@ -1401,9 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd);
struct scsiio_tracker *mpt3sas_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
struct scsi_cmnd *scmd);
void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
struct scsiio_tracker *st);
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
......@@ -1439,6 +1435,8 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
......@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
struct _raid_device *raid_device);
u8
mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void
mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
u16 smid);
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
......
......@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
u16 i;
u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
struct MPT3SAS_DEVICE *priv_data;
unsigned long flags;
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
......@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
handle = le16_to_cpu(tm_request->DevHandle);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
for (i = ioc->scsiio_depth; i && !found; i--) {
scmd = ioc->scsi_lookup[i - 1].scmd;
if (scmd == NULL || scmd->device == NULL ||
scmd->device->hostdata == NULL)
for (smid = ioc->scsiio_depth; smid && !found; smid--) {
struct scsiio_tracker *st;
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
if (lun != scmd->device->lun)
continue;
......@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
continue;
if (priv_data->sas_target->handle != handle)
continue;
tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
st = scsi_cmd_priv(scmd);
tm_request->TaskMID = cpu_to_le16(st->smid);
found = 1;
}
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, pr_info(MPT3SAS_FMT
......
......@@ -1445,74 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
}
/**
* __scsih_scsi_lookup_get_clear - returns scmd entry without
* holding any lock.
* mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
static inline struct scsi_cmnd *
__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
u16 smid)
struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
struct scsiio_tracker *st;
swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
return scmd;
}
/**
* _scsih_scsi_lookup_get_clear - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will derefrence the stored scmd pointer.
*/
static inline struct scsi_cmnd *
_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
unsigned long flags;
struct scsi_cmnd *scmd;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return scmd;
}
/**
* _scsih_scsi_lookup_find_by_scmd - scmd lookup
* @ioc: per adapter object
* @smid: system request message index
* @scmd: pointer to scsi command object
* Context: This function will acquire ioc->scsi_lookup_lock.
*
* This will search for a scmd pointer in the scsi_lookup array,
* returning the revelent smid. A returned value of zero means invalid.
*/
struct scsiio_tracker *
_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
*scmd)
{
struct scsiio_tracker *st = NULL;
unsigned long flags;
int i;
if (smid > 0 &&
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
u32 unique_tag = smid - 1;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
for (i = 0; i < ioc->scsiio_depth; i++) {
if (ioc->scsi_lookup[i].scmd == scmd) {
st = &ioc->scsi_lookup[i];
goto out;
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
if (st->cb_idx == 0xFF)
scmd = NULL;
}
}
out:
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return st;
return scmd;
}
/**
......@@ -2869,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsiio_tracker *st = NULL;
struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
......@@ -2887,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
goto out;
}
/* search for the command */
st = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
if (!st) {
/* check for completed command */
if (st == NULL || st->cb_idx == 0xFF) {
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
......@@ -2911,7 +2867,7 @@ scsih_abort(struct scsi_cmnd *scmd)
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
st->smid, st->msix_io, 30);
/* Command must be cleared after abort */
if (r == SUCCESS && st->scmd)
if (r == SUCCESS && st->cb_idx != 0xFF)
r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
......@@ -4484,16 +4440,18 @@ static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
struct scsiio_tracker *st;
u16 smid;
u16 count = 0;
int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
_scsih_set_satl_pending(scmd, false);
mpt3sas_base_free_smid(ioc, smid);
st = scsi_cmd_priv(scmd);
mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
scmd->result = DID_NO_CONNECT << 16;
......@@ -4748,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
mpt3sas_setup_direct_io(ioc, scmd,
raid_device, mpi_request, smid);
raid_device, mpi_request);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
......@@ -5216,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
......@@ -5223,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
ioc->got_task_abort_from_ioctl)
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
else
scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
......@@ -5257,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
if (mpt3sas_scsi_direct_io_get(ioc, smid) &&
st = scsi_cmd_priv(scmd);
if (st->direct_io &&
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->scsi_lookup[smid - 1].scmd = scmd;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
mpt3sas_scsi_direct_io_set(ioc, smid, 0);
st->direct_io = 0;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
......@@ -5441,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
out:
scsi_dma_unmap(scmd);
mpt3sas_base_free_smid(ioc, smid);
scmd->scsi_done(scmd);
return 1;
return 0;
}
/**
......@@ -7428,10 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
st = &ioc->scsi_lookup[smid - 1];
scmd = st->scmd;
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
......@@ -7454,7 +7405,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
st->msix_io, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
......@@ -7495,9 +7446,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out_no_lock;
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
st->msix_io, 30);
if (r == FAILED || st->scmd) {
if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
......@@ -10303,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 2.0 HBA devices */
......@@ -10341,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 3.0 HBA devices */
......
......@@ -260,35 +260,6 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
return;
}
/**
* mpt3sas_scsi_direct_io_get - returns direct io flag
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
*/
inline u8
mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsiio_tracker *st = mpt3sas_get_st_from_smid(ioc, smid);
return st ? st->direct_io : 0;
}
/**
* mpt3sas_scsi_direct_io_set - sets direct io flag
* @ioc: per adapter object
* @smid: system request message index
* @direct_io: Zero or non-zero value to set in the direct_io flag
*
* Returns Nothing.
*/
inline void
mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
{
ioc->scsi_lookup[smid - 1].direct_io = direct_io;
}
/**
* mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
* @ioc: per adapter object
......@@ -301,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
u16 smid)
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
u8 num_pds, cmd = scmd->cmnd[0];
struct scsiio_tracker *st = scsi_cmd_priv(scmd);
if (cmd != READ_10 && cmd != WRITE_10 &&
cmd != READ_16 && cmd != WRITE_16)
......@@ -342,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
else
put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
mpt3sas_scsi_direct_io_set(ioc, smid, 1);
st->direct_io = 1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment