Commit f0846c68 authored by Jeff Skirvin's avatar Jeff Skirvin Committed by Dan Williams

isci: Cleaning up task execute path.

Made sure the device ready check accounts for all states.
Moved the aborted task check into the loop of pulling task requests
off of the submitted list.
Signed-off-by: default avatarJeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: default avatarJacek Danecki <Jacek.Danecki@intel.com>
[remove host and device starting state checks]
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 1fad9e93
...@@ -83,21 +83,10 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -83,21 +83,10 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
unsigned long flags; unsigned long flags;
int ret; int ret;
enum sci_status status; enum sci_status status;
enum isci_status device_status;
dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num); dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
isci_task_complete_for_upper_layer(
task,
SAS_TASK_UNDELIVERED,
SAM_STAT_TASK_ABORTED,
isci_perform_normal_io_completion
);
return 0; /* The I/O was accepted (and failed). */
}
if ((task->dev == NULL) || (task->dev->port == NULL)) { if ((task->dev == NULL) || (task->dev->port == NULL)) {
/* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer /* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer
...@@ -143,93 +132,105 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -143,93 +132,105 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
/* We don't have a valid host reference, so we /* We don't have a valid host reference, so we
* can't control the host queueing condition. * can't control the host queueing condition.
*/ */
continue; goto next_task;
} }
device = isci_dev_from_domain_dev(task->dev); device = isci_dev_from_domain_dev(task->dev);
isci_host = isci_host_from_sas_ha(task->dev->port->ha); isci_host = isci_host_from_sas_ha(task->dev->port->ha);
if (device && device->status == isci_ready) { if (device)
device_status = device->status;
else
device_status = isci_freed;
/* From this point onward, any process that needs to guarantee
* that there is no kernel I/O being started will have to wait
* for the quiesce spinlock.
*/
if (device_status != isci_ready_for_io) {
/* Forces a retry from scsi mid layer. */ /* Forces a retry from scsi mid layer. */
dev_warn(task->dev->port->ha->dev, dev_warn(task->dev->port->ha->dev,
"%s: task %p: isci_host->status = %d, " "%s: task %p: isci_host->status = %d, "
"device = %p\n", "device = %p; device_status = 0x%x\n\n",
__func__, __func__,
task, task,
isci_host_get_state(isci_host), isci_host_get_state(isci_host),
device); device, device_status);
if (device)
dev_dbg(task->dev->port->ha->dev,
"%s: device->status = 0x%x\n",
__func__, device->status);
/* Indicate QUEUE_FULL so that the scsi midlayer if (device_status == isci_ready) {
* retries. /* Indicate QUEUE_FULL so that the scsi midlayer
*/ * retries.
isci_task_complete_for_upper_layer( */
task, isci_task_complete_for_upper_layer(
SAS_TASK_COMPLETE, task,
SAS_QUEUE_FULL, SAS_TASK_COMPLETE,
isci_perform_normal_io_completion SAS_QUEUE_FULL,
); isci_perform_normal_io_completion
);
} else {
/* Else, the device is going down. */
isci_task_complete_for_upper_layer(
task,
SAS_TASK_UNDELIVERED,
SAS_DEVICE_UNKNOWN,
isci_perform_normal_io_completion
);
}
isci_host_can_dequeue(isci_host, 1); isci_host_can_dequeue(isci_host, 1);
} } else {
/* the device is going down... */ /* There is a device and it's ready for I/O. */
else if (!device || device->status != isci_ready_for_io) { spin_lock_irqsave(&task->task_state_lock, flags);
dev_dbg(task->dev->port->ha->dev, if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
"%s: task %p: isci_host->status = %d, "
"device = %p\n",
__func__,
task,
isci_host_get_state(isci_host),
device);
if (device) spin_unlock_irqrestore(&task->task_state_lock,
dev_dbg(task->dev->port->ha->dev, flags);
"%s: device->status = 0x%x\n",
__func__, device->status);
/* Indicate SAS_TASK_UNDELIVERED, so that the scsi isci_task_complete_for_upper_layer(
* midlayer removes the target. task,
*/ SAS_TASK_UNDELIVERED,
isci_task_complete_for_upper_layer( SAM_STAT_TASK_ABORTED,
task, isci_perform_normal_io_completion
SAS_TASK_UNDELIVERED, );
SAS_DEVICE_UNKNOWN,
isci_perform_normal_io_completion
);
isci_host_can_dequeue(isci_host, 1);
} else { /* The I/O was aborted. */
/* build and send the request. */
status = isci_request_execute(isci_host, task, &request,
gfp_flags);
if (status == SCI_SUCCESS) { } else {
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
} else {
/* Indicate QUEUE_FULL so that the scsi /* build and send the request. */
* midlayer retries. if the request status = isci_request_execute(isci_host, task, &request,
* failed for remote device reasons, gfp_flags);
* it gets returned as
* SAS_TASK_UNDELIVERED next time if (status != SCI_SUCCESS) {
* through.
*/ spin_lock_irqsave(&task->task_state_lock, flags);
isci_task_complete_for_upper_layer( /* Did not really start this command. */
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* Indicate QUEUE_FULL so that the scsi
* midlayer retries. if the request
* failed for remote device reasons,
* it gets returned as
* SAS_TASK_UNDELIVERED next time
* through.
*/
isci_task_complete_for_upper_layer(
task, task,
SAS_TASK_COMPLETE, SAS_TASK_COMPLETE,
SAS_QUEUE_FULL, SAS_QUEUE_FULL,
isci_perform_normal_io_completion isci_perform_normal_io_completion
); );
isci_host_can_dequeue(isci_host, 1); isci_host_can_dequeue(isci_host, 1);
}
} }
} }
next_task:
task = list_entry(task->list.next, struct sas_task, list); task = list_entry(task->list.next, struct sas_task, list);
} while (--num > 0); } while (--num > 0);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment