Commit 4794bf02 authored by Christoph Hellwig's avatar Christoph Hellwig

[PATCH] turn scsi_allocate_device into readable code

parent 1b29b8d4
......@@ -1601,7 +1601,7 @@ return -ENOTSUPP;
scsi_cdb[0] = RELEASE;
// allocate with wait = true, interruptible = false
SCpnt = scsi_allocate_device(ScsiDev, 1, 0);
SCpnt = scsi_allocate_device(ScsiDev, 1);
{
CPQFC_DECLARE_COMPLETION(wait);
......
......@@ -4599,7 +4599,7 @@ static void gdth_flush(int hanum)
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_tab[hanum]);
scp = scsi_allocate_device(sdev, 1, FALSE);
scp = scsi_allocate_device(sdev, 1);
scp->cmd_len = 12;
scp->use_sg = 0;
#else
......@@ -4673,7 +4673,7 @@ void gdth_halt(void)
memset(cmnd, 0xff, MAX_COMMAND_SIZE);
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_tab[hanum]);
scp = scsi_allocate_device(sdev, 1, FALSE);
scp = scsi_allocate_device(sdev, 1);
scp->cmd_len = 12;
scp->use_sg = 0;
#else
......
......@@ -48,7 +48,7 @@ static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum)
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_vtab[vh]);
scp = scsi_allocate_device(sdev, 1, FALSE);
scp = scsi_allocate_device(sdev, 1);
if (!scp)
return -ENOMEM;
scp->cmd_len = 12;
......@@ -712,7 +712,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_vtab[vh]);
scp = scsi_allocate_device(sdev, 1, FALSE);
scp = scsi_allocate_device(sdev, 1);
if (!scp)
return -ENOMEM;
scp->cmd_len = 12;
......
......@@ -346,6 +346,41 @@ void scsi_release_request(Scsi_Request * req)
kfree(req);
}
/*
* FIXME(eric) - this is not at all optimal. Given that
* single lun devices are rare and usually slow
* (i.e. CD changers), this is good enough for now, but
* we may want to come back and optimize this later.
*
* Scan through all of the devices attached to this
* host, and see if any are active or not. If so,
* we need to defer this command.
*
* We really need a busy counter per device. This would
* allow us to more easily figure out whether we should
* do anything here or not.
*/
static int check_all_luns(struct Scsi_Host *shost, struct scsi_device *myself)
{
struct scsi_device *sdev;
for (sdev = shost->host_queue; sdev; sdev = sdev->next) {
/*
* Only look for other devices on the same bus
* with the same target ID.
*/
if (sdev->channel != myself->channel || sdev->id != myself->id)
continue;
if (sdev == myself)
continue;
if (atomic_read(&sdev->device_active))
return 1;
}
return 0;
}
/*
* Function: scsi_allocate_device
*
......@@ -372,89 +407,30 @@ void scsi_release_request(Scsi_Request * req)
* This function is deprecated, and drivers should be
* rewritten to use Scsi_Request instead of Scsi_Cmnd.
*/
Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
int interruptable)
struct scsi_cmnd *scsi_allocate_device(struct scsi_device *sdev, int wait)
{
struct Scsi_Host *host;
Scsi_Cmnd *SCpnt = NULL;
Scsi_Device *SDpnt;
DECLARE_WAITQUEUE(wq, current);
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *scmnd;
unsigned long flags;
if (!device)
panic("No device passed to scsi_allocate_device().\n");
host = device->host;
spin_lock_irqsave(&device_request_lock, flags);
while (1 == 1) {
SCpnt = NULL;
if (!device->device_blocked) {
if (device->single_lun) {
/*
* FIXME(eric) - this is not at all optimal. Given that
* single lun devices are rare and usually slow
* (i.e. CD changers), this is good enough for now, but
* we may want to come back and optimize this later.
*
* Scan through all of the devices attached to this
* host, and see if any are active or not. If so,
* we need to defer this command.
*
* We really need a busy counter per device. This would
* allow us to more easily figure out whether we should
* do anything here or not.
*/
for (SDpnt = host->host_queue;
SDpnt;
SDpnt = SDpnt->next) {
/*
* Only look for other devices on the same bus
* with the same target ID.
*/
if (SDpnt->channel != device->channel
|| SDpnt->id != device->id
|| SDpnt == device) {
continue;
}
if( atomic_read(&SDpnt->device_active) != 0)
{
break;
}
}
if (SDpnt) {
/*
* Some other device in this cluster is busy.
* If asked to wait, we need to wait, otherwise
* return NULL.
*/
SCpnt = NULL;
while (1) {
if (sdev->device_blocked)
goto busy;
}
}
if (sdev->single_lun && check_all_luns(shost, sdev))
goto busy;
/*
* Now we can check for a free command block for this device.
*/
for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
if (SCpnt->request == NULL)
break;
}
}
/*
* If we couldn't find a free command block, and we have been
* asked to wait, then do so.
*/
if (SCpnt) {
break;
}
busy:
/*
* If we have been asked to wait for a free block, then
* wait here.
*/
if (wait) {
DECLARE_WAITQUEUE(wait, current);
for (scmnd = sdev->device_queue; scmnd; scmnd = scmnd->next)
if (!scmnd->request)
goto found;
busy:
if (!wait)
goto fail;
/*
* We need to wait for a free commandblock. We need to
......@@ -464,80 +440,54 @@ Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
* to schedule() wouldn't block (well, it might switch,
* but the current task will still be schedulable.
*/
add_wait_queue(&device->scpnt_wait, &wait);
if( interruptable ) {
set_current_state(TASK_INTERRUPTIBLE);
} else {
add_wait_queue(&sdev->scpnt_wait, &wq);
set_current_state(TASK_UNINTERRUPTIBLE);
}
spin_unlock_irqrestore(&device_request_lock, flags);
/*
* This should block until a device command block
* becomes available.
*/
schedule();
spin_lock_irqsave(&device_request_lock, flags);
remove_wait_queue(&device->scpnt_wait, &wait);
/*
* FIXME - Isn't this redundant?? Someone
* else will have forced the state back to running.
*/
remove_wait_queue(&sdev->scpnt_wait, &wq);
set_current_state(TASK_RUNNING);
/*
* In the event that a signal has arrived that we need
* to consider, then simply return NULL. Everyone
* that calls us should be prepared for this
* possibility, and pass the appropriate code back
* to the user.
*/
if( interruptable ) {
if (signal_pending(current)) {
spin_unlock_irqrestore(&device_request_lock, flags);
return NULL;
}
}
} else {
spin_unlock_irqrestore(&device_request_lock, flags);
return NULL;
}
}
SCpnt->request = NULL;
atomic_inc(&SCpnt->host->host_active);
atomic_inc(&SCpnt->device->device_active);
found:
scmnd->request = NULL;
atomic_inc(&scmnd->host->host_active);
atomic_inc(&scmnd->device->device_active);
SCpnt->buffer = NULL;
SCpnt->bufflen = 0;
SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
scmnd->buffer = NULL;
scmnd->bufflen = 0;
scmnd->request_buffer = NULL;
scmnd->request_bufflen = 0;
SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
SCpnt->old_use_sg = 0;
SCpnt->transfersize = 0; /* No default transfer size */
SCpnt->cmd_len = 0;
scmnd->use_sg = 0; /* Reset the scatter-gather flag */
scmnd->old_use_sg = 0;
scmnd->transfersize = 0; /* No default transfer size */
scmnd->cmd_len = 0;
SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
SCpnt->sc_request = NULL;
SCpnt->sc_magic = SCSI_CMND_MAGIC;
scmnd->sc_data_direction = SCSI_DATA_UNKNOWN;
scmnd->sc_request = NULL;
scmnd->sc_magic = SCSI_CMND_MAGIC;
SCpnt->result = 0;
SCpnt->underflow = 0; /* Do not flag underflow conditions */
SCpnt->old_underflow = 0;
SCpnt->resid = 0;
SCpnt->state = SCSI_STATE_INITIALIZING;
SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
scmnd->result = 0;
scmnd->underflow = 0; /* Do not flag underflow conditions */
scmnd->old_underflow = 0;
scmnd->resid = 0;
scmnd->state = SCSI_STATE_INITIALIZING;
scmnd->owner = SCSI_OWNER_HIGHLEVEL;
spin_unlock_irqrestore(&device_request_lock, flags);
SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
SCpnt->target,
atomic_read(&SCpnt->host->host_active)));
scmnd->target,
atomic_read(&scmnd->host->host_active)));
return SCpnt;
return scmnd;
fail:
spin_unlock_irqrestore(&device_request_lock, flags);
return NULL;
}
inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
......
......@@ -455,7 +455,7 @@ extern void scsi_slave_detach(struct scsi_device *sdev);
extern void scsi_done(Scsi_Cmnd * SCpnt);
extern void scsi_finish_command(Scsi_Cmnd *);
extern int scsi_retry_command(Scsi_Cmnd *);
extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int, int);
extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int);
extern void __scsi_release_command(Scsi_Cmnd *);
extern void scsi_release_command(Scsi_Cmnd *);
extern void scsi_do_cmd(Scsi_Cmnd *, const void *cmnd,
......
......@@ -762,8 +762,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
SRpnt = (Scsi_Request *) req->special;
if( SRpnt->sr_magic == SCSI_REQ_MAGIC ) {
SCpnt = scsi_allocate_device(SRpnt->sr_device,
FALSE, FALSE);
SCpnt = scsi_allocate_device(SRpnt->sr_device, 0);
if (!SCpnt)
return BLKPREP_DEFER;
scsi_init_cmd_from_req(SCpnt, SRpnt);
......@@ -776,7 +775,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
if (req->special) {
SCpnt = (Scsi_Cmnd *) req->special;
} else {
SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
SCpnt = scsi_allocate_device(SDpnt, 0);
}
/*
* if command allocation failure, wait a bit
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment