Commit cb609700 authored by Doug Ledford's avatar Doug Ledford Committed by Doug Ledford

scsi.c, scsi.h, scsi_syms.c, aic7xxx_old: add new function to track queue

	full events at the mid layer instead of at the low level device
	driver
parent 862d699f
......@@ -3956,6 +3956,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
unsigned char target, lun, tindex;
unsigned char queue_flag = FALSE;
char channel;
int result;
target = ((aic_inb(p, SAVED_TCL) >> 4) & 0x0f);
if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
......@@ -4457,69 +4458,42 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb));
}
#endif
#if 0
if (queue_flag)
{
if ( p->dev_last_queue_full[tindex] !=
p->dev_active_cmds[tindex] )
{
p->dev_last_queue_full[tindex] =
p->dev_active_cmds[tindex];
p->dev_last_queue_full_count[tindex] = 0;
}
else
{
p->dev_last_queue_full_count[tindex]++;
}
if ( (p->dev_last_queue_full_count[tindex] > 14) &&
(p->dev_active_cmds[tindex] > 4) )
{
int diff, lun;
if (p->dev_active_cmds[tindex] > p->dev_lun_queue_depth[tindex])
/* We don't know what to do here, so bail. */
break;
int diff;
result = scsi_track_queue_full(cmd->device,
aic_dev->active_cmds);
if ( result < 0 )
{
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
printk(INFO_LEAD "Tagged Command Queueing disabled.\n",
p->host_no, CTL_OF_SCB(scb));
diff = aic_dev->max_q_depth - p->host->cmd_per_lun;
aic_dev->temp_q_depth = 1;
aic_dev->max_q_depth = 1;
}
else if ( result > 0 )
{
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no,
CTL_OF_SCB(scb), p->dev_active_cmds[tindex]);
diff = p->dev_lun_queue_depth[tindex] -
p->dev_active_cmds[tindex];
p->dev_lun_queue_depth[tindex] -= diff;
for(lun = 0; lun < p->host->max_lun; lun++)
{
if(p->Scsi_Dev[tindex][lun] != NULL)
{
p->dev_max_queue_depth[tindex] -= diff;
scsi_adjust_queue_depth(p->Scsi_Dev[tindex][lun], 1,
p->dev_lun_queue_depth[tindex]);
if(p->dev_temp_queue_depth[tindex] > p->dev_max_queue_depth[tindex])
p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
}
}
p->dev_last_queue_full[tindex] = 0;
p->dev_last_queue_full_count[tindex] = 0;
}
else if (p->dev_active_cmds[tindex] == 0)
{
if (aic7xxx_verbose & VERBOSE_NEGOTIATION)
{
printk(INFO_LEAD "QUEUE_FULL status received with 0 "
"commands active.\n", p->host_no, CTL_OF_SCB(scb));
printk(INFO_LEAD "Tagged Command Queueing disabled\n",
p->host_no, CTL_OF_SCB(scb));
}
p->dev_max_queue_depth[tindex] = 1;
p->dev_temp_queue_depth[tindex] = 1;
scb->tag_action = 0;
scb->hscb->control &= ~(MSG_ORDERED_Q_TAG|MSG_SIMPLE_Q_TAG);
}
else
{
aic_dev->flags[tindex] |= DEVICE_WAS_BUSY;
p->dev_temp_queue_depth[tindex] =
p->dev_active_cmds[tindex];
}
}
#endif
CTL_OF_SCB(scb), result);
diff = aic_dev->max_q_depth - result;
aic_dev->max_q_depth = result;
/* temp_q_depth could have been dropped to 1 for an untagged
* command that might be coming up */
if(aic_dev->temp_q_depth > result)
aic_dev->temp_q_depth = result;
}
/* We should free up the no unused SCB entries. But, that's
* a difficult thing to do because we use a direct indexed
* array, so we can't just take any entries and free them,
* we *have* to free the ones at the end of the array, and
* they very well could be in use right now, which means
* in order to do this right, we have to add a delayed
* freeing mechanism tied into the scb_free() code area.
* We'll add that later.
*/
}
break;
}
......@@ -11003,7 +10977,7 @@ aic7xxx_biosparam(struct scsi_device *sdev, struct block_device *bdev,
if(capacity > (65535 * heads * sectors))
cylinders = 65535;
else
cylinders = ((unsigned int)capacity) / (heads * sectors);
cylinders = ((unsigned int)capacity) / (unsigned int)(heads * sectors);
}
geom[0] = (int)heads;
......
......@@ -313,7 +313,7 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
p->user[tindex].options);
if(sdptr->simple_tags)
{
size += sprintf(BLS, " Tagged Command Queueing Enabled, Ordered Tags %s\n", sdptr->ordered_tags ? "Enabled" : "Disabled");
size += sprintf(BLS, " Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->new_queue_depth, aic_dev->max_q_depth);
}
if(aic_dev->barrier_total)
size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n",
......
......@@ -1662,6 +1662,58 @@ void scsi_adjust_queue_depth(Scsi_Device *SDpnt, int tagged, int tags)
spin_unlock_irqrestore(&device_request_lock, flags);
}
/*
* Function: scsi_track_queue_full()
*
* Purpose: This function will track successive QUEUE_FULL events on a
* specific SCSI device to determine if and when there is a
* need to adjust the queue depth on the device.
*
* Arguments: SDpnt - SCSI Device in question
* depth - Current number of outstanding SCSI commands on
* this device, not counting the one returned as
* QUEUE_FULL.
*
* Returns: 0 - No change needed
* >0 - Adjust queue depth to this new depth
* -1 - Drop back to untagged operation using host->cmd_per_lun
* as the untagged command depth
*
* Lock Status: None held on entry
*
* Notes: Low level drivers may call this at any time and we will do
* "The Right Thing." We are interrupt context safe.
*/
int scsi_track_queue_full(Scsi_Device *SDptr, int depth)
{
if((jiffies >> 4) != SDptr->last_queue_full_time) {
SDptr->last_queue_full_time = (jiffies >> 4);
if(SDptr->last_queue_full_depth == depth)
SDptr->last_queue_full_count++;
else {
SDptr->last_queue_full_count = 1;
SDptr->last_queue_full_depth = depth;
}
if(SDptr->last_queue_full_count > 10) {
if(SDptr->last_queue_full_depth < 8) {
/* Drop back to untagged */
scsi_adjust_queue_depth(SDptr, 0 /* untagged */,
SDptr->host->cmd_per_lun);
return -1;
}
if(SDptr->ordered_tags)
scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG,
depth);
else
scsi_adjust_queue_depth(SDptr, MSG_SIMPLE_TAG,
depth);
return depth;
}
}
return 0;
}
/*
* scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into
* devinfo vendor and model strings.
......
......@@ -466,6 +466,7 @@ extern void scsi_bottom_half_handler(void);
extern void scsi_release_commandblocks(Scsi_Device * SDpnt);
extern void scsi_build_commandblocks(Scsi_Device * SDpnt);
extern void scsi_adjust_queue_depth(Scsi_Device *, int, int);
extern int scsi_track_queue_full(Scsi_Device *, int);
extern int scsi_slave_attach(struct scsi_device *sdev);
extern void scsi_slave_detach(struct scsi_device *sdev);
extern void scsi_done(Scsi_Cmnd * SCpnt);
......@@ -592,6 +593,11 @@ struct scsi_device {
Scsi_Cmnd *current_cmnd; /* currently active command */
unsigned short current_queue_depth;/* How deep of a queue we have */
unsigned short new_queue_depth; /* How deep of a queue we want */
unsigned short last_queue_full_depth; /* These two are used by */
unsigned short last_queue_full_count; /* scsi_track_queue_full() */
unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same
jiffie count on our counter, they
could all be from the same event. */
unsigned int id, lun, channel;
......
......@@ -68,6 +68,7 @@ EXPORT_SYMBOL(scsi_report_bus_reset);
EXPORT_SYMBOL(scsi_block_requests);
EXPORT_SYMBOL(scsi_unblock_requests);
EXPORT_SYMBOL(scsi_adjust_queue_depth);
EXPORT_SYMBOL(scsi_track_queue_full);
EXPORT_SYMBOL(scsi_get_host_dev);
EXPORT_SYMBOL(scsi_free_host_dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment