Commit 36769af8 authored by Justin T. Gibbs's avatar Justin T. Gibbs

Update Aic7xxx driver [Rev 6.2.31]

 o Clean up driver locking by making the locking semantics between
   2.4.X and 2.5.X almost identical.  Take advantage of SCSI_HAS_HOST_LOCK
   in certain RedHat kernels.
 o Clean up command line parsing.
 o Fix module unload/reload issues stemming from DV thread teardown
   and a missing deregistration of our reboot notifier (lost during
   PCI hot plug integration).
parent 80e044ad
......@@ -37,7 +37,7 @@
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#123 $
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#124 $
*
* $FreeBSD$
*/
......@@ -3905,7 +3905,7 @@ ahc_softc_insert(struct ahc_softc *ahc)
*/
list_ahc = TAILQ_FIRST(&ahc_tailq);
while (list_ahc != NULL
&& ahc_softc_comp(list_ahc, ahc) <= 0)
&& ahc_softc_comp(ahc, list_ahc) <= 0)
list_ahc = TAILQ_NEXT(list_ahc, links);
if (list_ahc != NULL)
TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
......
/*
* Adaptec AIC7xxx device driver for Linux.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#195 $
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#206 $
*
* Copyright (c) 1994 John Aycock
* The University of Calgary Department of Computer Science.
......@@ -488,12 +488,11 @@ MODULE_PARM_DESC(aic7xxx,
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
"\n"
" Sample /etc/modules.conf line:\n"
" Enable verbose logging\n"
" Disable EISA/VLB probing\n"
" Toggle EISA/VLB probing\n"
" Set tag depth on Controller 1/Target 2 to 10 tags\n"
" Shorten the selection timeout to 128ms\n"
"\n"
" options aic7xxx='\"verbose.no_probe.tag_info:{{}.{..10}}.seltime:1\"'\n"
" options aic7xxx 'aic7xxx=no_probe.tag_info:{{}.{..10}}.seltime:1'\n"
);
#endif
......@@ -513,6 +512,7 @@ static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
static void ahc_linux_start_dv(struct ahc_softc *ahc);
static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);
static int ahc_linux_dv_thread(void *data);
static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);
static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);
static void ahc_linux_dv_transition(struct ahc_softc *ahc,
struct scsi_cmnd *cmd,
......@@ -565,9 +565,9 @@ static void ahc_linux_free_device(struct ahc_softc*,
struct ahc_linux_device*);
static void ahc_linux_run_device_queue(struct ahc_softc*,
struct ahc_linux_device*);
static void ahc_linux_setup_tag_info(char *p, char *end, char *s);
static void ahc_linux_setup_tag_info_global(char *p);
static void ahc_linux_setup_dv(char *p, char *end, char *s);
static aic_option_callback_t ahc_linux_setup_tag_info;
static aic_option_callback_t ahc_linux_setup_dv;
static int aic7xxx_setup(char *s);
static int ahc_linux_next_unit(void);
static void ahc_runq_tasklet(unsigned long data);
......@@ -620,6 +620,9 @@ ahc_schedule_completeq(struct ahc_softc *ahc, struct ahc_cmd *acmd)
}
}
/*
* Must be called with our lock held.
*/
static __inline void
ahc_schedule_runq(struct ahc_softc *ahc)
{
......@@ -666,8 +669,8 @@ ahc_linux_run_complete_queue(struct ahc_softc *ahc, struct ahc_cmd *acmd)
u_long done_flags;
int with_errors;
ahc_done_lock(ahc, &done_flags);
with_errors = 0;
ahc_done_lock(ahc, &done_flags);
while (acmd != NULL) {
Scsi_Cmnd *cmd;
......@@ -1098,7 +1101,7 @@ ahc_linux_select_queue_depth(struct Scsi_Host * host,
u_long flags;
ahc = *((struct ahc_softc **)host->hostdata);
ahc_midlayer_entrypoint_lock(ahc, &flags);
ahc_lock(ahc, &flags);
for (device = scsi_devs; device != NULL; device = device->next) {
if (device->host == host) {
struct ahc_linux_device *dev;
......@@ -1131,7 +1134,7 @@ ahc_linux_select_queue_depth(struct Scsi_Host * host,
}
}
}
ahc_midlayer_entrypoint_unlock(ahc, &flags);
ahc_unlock(ahc, &flags);
}
#endif
......@@ -1242,30 +1245,23 @@ ahc_linux_bus_reset(Scsi_Cmnd *cmd)
int found;
ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_unlock_irq(&io_request_lock);
#endif
ahc_midlayer_entrypoint_lock(ahc, &s);
found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
/*initiate reset*/TRUE);
acmd = TAILQ_FIRST(&ahc->platform_data->completeq);
TAILQ_INIT(&ahc->platform_data->completeq);
ahc_midlayer_entrypoint_unlock(ahc, &s);
if (bootverbose)
printf("%s: SCSI bus reset delivered. "
"%d SCBs aborted.\n", ahc_name(ahc), found);
if (acmd != NULL) {
acmd = ahc_linux_run_complete_queue(ahc, acmd);
if (acmd != NULL) {
ahc_midlayer_entrypoint_lock(ahc, &s);
if (acmd != NULL)
ahc_schedule_completeq(ahc, acmd);
ahc_midlayer_entrypoint_unlock(ahc, &s);
}
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_lock_irq(&io_request_lock);
#endif
ahc_midlayer_entrypoint_unlock(ahc, &s);
if (bootverbose)
printf("%s: SCSI bus reset delivered. "
"%d SCBs aborted.\n", ahc_name(ahc), found);
return SUCCESS;
}
......@@ -1317,15 +1313,25 @@ Scsi_Host_Template aic7xxx_driver_template = {
/**************************** Tasklet Handler *********************************/
/*
* In 2.4.X and above, this routine is called from a tasklet,
* so we must re-acquire our lock prior to executing this code.
* In all prior kernels, ahc_schedule_runq() calls this routine
* directly and ahc_schedule_runq() is called with our lock held.
*/
static void
ahc_runq_tasklet(unsigned long data)
{
struct ahc_softc* ahc;
struct ahc_linux_device *dev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
u_long flags;
#endif
ahc = (struct ahc_softc *)data;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
ahc_lock(ahc, &flags);
#endif
while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
......@@ -1335,7 +1341,9 @@ ahc_runq_tasklet(unsigned long data)
ahc_unlock(ahc, &flags);
ahc_lock(ahc, &flags);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
ahc_unlock(ahc, &flags);
#endif
}
/************************ Shutdown/halt/reboot hook ***************************/
......@@ -1499,6 +1507,12 @@ ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
}
/********************* Platform Dependent Functions ***************************/
/*
* Compare "left hand" softc with "right hand" softc, returning:
* < 0 - lahc has a lower priority than rahc
* 0 - Softcs are equal
* > 0 - lahc has a higher priority than rahc
*/
int
ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
{
......@@ -1517,7 +1531,7 @@ ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
- (rahc->flags & AHC_BIOS_ENABLED);
if (value != 0)
/* Controllers with BIOS enabled have a *higher* priority */
return (-value);
return (value);
/*
* Same BIOS setting, now sort based on bus type.
......@@ -1530,7 +1544,7 @@ ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
lvalue = (lahc->chip & AHC_BUS_MASK);
if (lvalue == AHC_VL)
lvalue = AHC_EISA;
value = lvalue - rvalue;
value = rvalue - lvalue;
if (value != 0)
return (value);
......@@ -1541,40 +1555,40 @@ ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
char primary_channel;
if (aic7xxx_reverse_scan != 0)
value = ahc_get_pci_bus(rahc->dev_softc)
- ahc_get_pci_bus(lahc->dev_softc);
else
value = ahc_get_pci_bus(lahc->dev_softc)
- ahc_get_pci_bus(rahc->dev_softc);
else
value = ahc_get_pci_bus(rahc->dev_softc)
- ahc_get_pci_bus(lahc->dev_softc);
if (value != 0)
break;
if (aic7xxx_reverse_scan != 0)
value = ahc_get_pci_slot(rahc->dev_softc)
- ahc_get_pci_slot(lahc->dev_softc);
else
value = ahc_get_pci_slot(lahc->dev_softc)
- ahc_get_pci_slot(rahc->dev_softc);
else
value = ahc_get_pci_slot(rahc->dev_softc)
- ahc_get_pci_slot(lahc->dev_softc);
if (value != 0)
break;
/*
* On multi-function devices, the user can choose
* to have function 1 probed before function 0.
* Give whichever channel is the primary channel
* the lowest priority.
* the highest priority.
*/
primary_channel = (lahc->flags & AHC_PRIMARY_CHANNEL) + 'A';
value = 1;
if (lahc->channel == primary_channel)
value = -1;
if (lahc->channel == primary_channel)
value = 1;
break;
}
case AHC_EISA:
if ((rahc->flags & AHC_BIOS_ENABLED) != 0) {
value = lahc->platform_data->bios_address
- rahc->platform_data->bios_address;
value = rahc->platform_data->bios_address
- lahc->platform_data->bios_address;
} else {
value = lahc->bsh.ioport
- rahc->bsh.ioport;
value = rahc->bsh.ioport
- lahc->bsh.ioport;
}
break;
default:
......@@ -1583,90 +1597,6 @@ ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
return (value);
}
static void
ahc_linux_setup_tag_info(char *p, char *end, char *s)
{
char *base;
char *tok;
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
if (*p != ':')
return;
instance = -1;
targ = -1;
done = FALSE;
base = p;
/* Forward us just past the ':' */
tok = base + 1;
tok_end = strchr(tok, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*tok) {
case '{':
if (instance == -1)
instance = 0;
else if (targ == -1)
targ = 0;
tok++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
tok++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
if ((targ >= AHC_NUM_TARGETS) ||
(instance >= NUM_ELEMENTS(aic7xxx_tag_info)))
done = TRUE;
tok++;
if (!done) {
base = tok;
}
break;
case '\0':
done = TRUE;
break;
default:
done = TRUE;
tok_end = strchr(tok, '\0');
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(tok, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end)) {
tok_end = tok_end2;
done = FALSE;
}
}
if ((instance >= 0) && (targ >= 0)
&& (instance < NUM_ELEMENTS(aic7xxx_tag_info))
&& (targ < AHC_NUM_TARGETS)) {
aic7xxx_tag_info[instance].tag_commands[targ] =
simple_strtoul(tok, NULL, 0) & 0xff;
}
tok = tok_end;
break;
}
}
while ((p != base) && (p != NULL))
p = strsep(&s, ",.");
}
static void
ahc_linux_setup_tag_info_global(char *p)
{
......@@ -1683,77 +1613,28 @@ ahc_linux_setup_tag_info_global(char *p)
}
static void
ahc_linux_setup_dv(char *p, char *end, char *s)
ahc_linux_setup_tag_info(void *arg, int instance, int targ, int32_t value)
{
char *base;
char *tok;
char *tok_end;
char *tok_end2;
int i;
int instance;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
if (*p != ':')
return;
instance = -1;
done = FALSE;
base = p;
/* Forward us just past the ':' */
tok = base + 1;
tok_end = strchr(tok, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*tok) {
case '{':
if (instance == -1)
instance = 0;
tok++;
break;
case '}':
if (instance != -1)
instance = -1;
tok++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (instance >= 0)
instance++;
if (instance >= NUM_ELEMENTS(aic7xxx_dv_settings))
done = TRUE;
tok++;
if (!done) {
base = tok;
}
break;
case '\0':
done = TRUE;
break;
default:
done = TRUE;
tok_end = strchr(tok, '\0');
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(tok, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end)) {
tok_end = tok_end2;
done = FALSE;
}
if ((instance >= 0) && (targ >= 0)
&& (instance < NUM_ELEMENTS(aic7xxx_tag_info))
&& (targ < AHC_NUM_TARGETS)) {
aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
if (bootverbose)
printf("tag_info[%d:%d] = %d\n", instance, targ, value);
}
}
static void
ahc_linux_setup_dv(void *arg, int instance, int targ, int32_t value)
{
if ((instance >= 0)
&& (instance < NUM_ELEMENTS(aic7xxx_dv_settings))) {
aic7xxx_dv_settings[instance] =
simple_strtol(tok, NULL, 0);
}
tok = tok_end;
break;
}
aic7xxx_dv_settings[instance] = value;
if (bootverbose)
printf("dv[%d] = %d\n", instance, value);
}
while ((p != base) && (p != NULL))
p = strsep(&s, ",.");
}
/*
......@@ -1791,31 +1672,39 @@ aic7xxx_setup(char *s)
end = strchr(s, '\0');
/*
* XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
* will never be 0 in this case.
*/
n = 0;
while ((p = strsep(&s, ",.")) != NULL) {
if (*p == '\0')
continue;
for (i = 0; i < NUM_ELEMENTS(options); i++) {
n = strlen(options[i].name);
if (strncmp(options[i].name, p, n) != 0)
n = strlen(options[i].name);
if (strncmp(options[i].name, p, n) == 0)
break;
}
if (i == NUM_ELEMENTS(options))
continue;
if (!strncmp(p, "global_tag_depth", n)) {
if (strncmp(p, "global_tag_depth", n) == 0) {
ahc_linux_setup_tag_info_global(p + n);
} else if (!strncmp(p, "tag_info", n)) {
ahc_linux_setup_tag_info(p + n, end, s);
} else if (strncmp(p, "tag_info", n) == 0) {
s = aic_parse_brace_option("tag_info", p + n, end,
2, ahc_linux_setup_tag_info, NULL);
} else if (strncmp(p, "dv", n) == 0) {
ahc_linux_setup_dv(p + n, end, s);
s = aic_parse_brace_option("dv", p + n, end, 1,
ahc_linux_setup_dv, NULL);
} else if (p[n] == ':') {
*(options[i].flag) =
simple_strtoul(p + n + 1, NULL, 0);
} else if (!strncmp(p, "verbose", n)) {
*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
} else if (strncmp(p, "verbose", n) == 0) {
*(options[i].flag) = 1;
} else {
*(options[i].flag) = ~(*(options[i].flag));
}
break;
}
}
return 1;
}
......@@ -1844,6 +1733,8 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
ahc_lock(ahc, &s);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
scsi_assign_lock(host, &ahc->platform_data->spin_lock);
#elif AHC_SCSI_HAS_HOST_LOCK != 0
host->lock = &ahc->platform_data->spin_lock;
#endif
ahc->platform_data->host = host;
host->can_queue = AHC_MAX_QUEUE;
......@@ -2072,26 +1963,10 @@ ahc_platform_free(struct ahc_softc *ahc)
{
struct ahc_linux_target *targ;
struct ahc_linux_device *dev;
u_long s;
int i, j;
if (ahc->platform_data != NULL) {
/* Kill the DV kthread */
if (ahc->platform_data->dv_pid > 0) {
ahc_lock(ahc, &s);
ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
ahc_unlock(ahc, &s);
up(&ahc->platform_data->dv_sem);
do {
#ifdef AHC_DEBUG
if (ahc_debug & AHC_SHOW_DV) {
printf("%s: Waiting for DV thread to "
"exit\n", ahc_name(ahc));
}
#endif
} while (waitpid(ahc->platform_data->dv_pid, NULL,
__WCLONE) == -ERESTARTSYS);
}
ahc_linux_kill_dv_thread(ahc);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
tasklet_kill(&ahc->platform_data->runq_tasklet);
#endif
......@@ -2173,7 +2048,18 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
if (dev == NULL)
return;
was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
now_queuing = alg != AHC_QUEUE_NONE;
switch (alg) {
default:
case AHC_QUEUE_NONE:
now_queuing = 0;
break;
case AHC_QUEUE_BASIC:
now_queuing = AHC_DEV_Q_BASIC;
break;
case AHC_QUEUE_TAGGED:
now_queuing = AHC_DEV_Q_TAGGED;
break;
}
if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
&& (was_queuing != now_queuing)
&& (dev->active != 0)) {
......@@ -2322,24 +2208,12 @@ ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER;
acmd = TAILQ_FIRST(&ahc->platform_data->completeq);
TAILQ_INIT(&ahc->platform_data->completeq);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
if (acmd != NULL) {
acmd = ahc_linux_run_complete_queue(ahc, acmd);
if (acmd != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_lock(ahc, &flags);
#endif
if (acmd != NULL)
ahc_schedule_completeq(ahc, acmd);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
}
static void
......@@ -2364,6 +2238,48 @@ ahc_linux_start_dv(struct ahc_softc *ahc)
}
}
static void
ahc_linux_kill_dv_thread(struct ahc_softc *ahc)
{
u_long s;
ahc_lock(ahc, &s);
if (ahc->platform_data->dv_pid != 0) {
ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
ahc_unlock(ahc, &s);
up(&ahc->platform_data->dv_sem);
/*
* Use the eh_sem as an indicator that the
* dv thread is exiting. Note that the dv
* thread must still return after performing
* the up on our semaphore before it has
* completely exited this module. Unfortunately,
* there seems to be no easy way to wait for the
* exit of a thread for which you are not the
* parent (dv threads are parented by init).
* Cross your fingers...
*/
down(&ahc->platform_data->eh_sem);
/*
* Mark the dv thread as already dead. This
* avoids attempting to kill it a second time.
* This is necessary because we must kill the
* DV thread before calling ahc_free() in the
* module shutdown case to avoid bogus locking
* in the SCSI mid-layer, but we ahc_free() is
* called without killing the DV thread in the
* instance detach case, so ahc_platform_free()
* calls us again to verify that the DV thread
* is dead.
*/
ahc->platform_data->dv_pid = 0;
} else {
ahc_unlock(ahc, &s);
}
}
static int
ahc_linux_dv_thread(void *data)
{
......@@ -2406,7 +2322,7 @@ ahc_linux_dv_thread(void *data)
ahc_lock(ahc, &s);
if ((ahc->platform_data->flags & AHC_DV_SHUTDOWN) != 0) {
ahc_unlock(ahc, &s);
return (0);
break;
}
ahc_unlock(ahc, &s);
......@@ -2452,7 +2368,7 @@ ahc_linux_dv_thread(void *data)
*/
ahc_linux_release_simq((u_long)ahc);
}
up(&ahc->platform_data->eh_sem);
return (0);
}
......@@ -2599,14 +2515,19 @@ ahc_linux_dv_target(struct ahc_softc *ahc, u_int target_offset)
/*
* In 2.5.X, it is assumed that all calls from the
* "midlayer" (which we are emulating) will have the
* ahc host lock held.
* ahc host lock held. For other kernels, the
* io_request_lock must be held.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#if AHC_SCSI_HAS_HOST_LOCK != 0
ahc_lock(ahc, &s);
#else
spin_lock_irqsave(&io_request_lock, s);
#endif
ahc_linux_queue(cmd, ahc_linux_dv_complete);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#if AHC_SCSI_HAS_HOST_LOCK != 0
ahc_unlock(ahc, &s);
#else
spin_unlock_irqrestore(&io_request_lock, s);
#endif
down_interruptible(&ahc->platform_data->dv_cmd_sem);
/*
......@@ -3488,7 +3409,6 @@ ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
{
struct ahc_softc *ahc;
struct ahc_cmd *acmd;
struct ahc_linux_device *next_dev;
struct scb *scb;
u_long flags;
......@@ -3535,36 +3455,16 @@ ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
ahc->platform_data->reset_timer.function =
(ahc_linux_callback_t *)ahc_linux_release_simq;
add_timer(&ahc->platform_data->reset_timer);
/*
* In 2.5.X, the "done lock" is the ahc_lock.
* Instead of dropping and re-acquiring the same
* lock in the 2.5.X case, just hold our ahc_lock
* the whole time. ahc_done_lock() has been
* made a no-op for 2.5.X too.
*/
acmd = TAILQ_FIRST(&ahc->platform_data->completeq);
TAILQ_INIT(&ahc->platform_data->completeq);
next_dev = ahc_linux_next_device_to_run(ahc);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
if (next_dev)
if (ahc_linux_next_device_to_run(ahc) != NULL)
ahc_schedule_runq(ahc);
if (acmd != NULL) {
acmd = ahc_linux_run_complete_queue(ahc, acmd);
if (acmd != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_lock(ahc, &flags);
#endif
if (acmd != NULL)
ahc_schedule_completeq(ahc, acmd);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
}
static void
......@@ -3652,17 +3552,17 @@ ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
tags = 0;
if ((ahc->user_discenable & devinfo->target_mask) != 0) {
if (warned_user == 0
&& ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
printf("aic7xxx: WARNING, insufficient "
"tag_info instances for installed "
"controllers. Using defaults\n");
printf("aic7xxx: Please update the "
"aic7xxx_tag_info array in the "
"aic7xxx.c source file.\n");
tags = AHC_MAX_QUEUE;
if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
if (warned_user == 0) {
printf(KERN_WARNING
"aic7xxx: WARNING: Insufficient tag_info instances\n"
"aic7xxx: for installed controllers. Using defaults\n"
"aic7xxx: Please update the aic7xxx_tag_info array in\n"
"aic7xxx: the aic7xxx_osm..c source file.\n");
warned_user++;
}
tags = AHC_MAX_QUEUE;
} else {
adapter_tag_info_t *tag_info;
......@@ -3681,17 +3581,17 @@ ahc_linux_user_dv_setting(struct ahc_softc *ahc)
static int warned_user;
int dv;
if (warned_user == 0
&& ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
if (ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
if (warned_user == 0) {
printf("aic7xxx: WARNING, insufficient "
"dv settings instances for installed "
"controllers. Using defaults\n");
printf("aic7xxx: Please update the "
"aic7xxx_dv_settings array in the "
"aic7xxx.c source file.\n");
dv = -1;
printf(KERN_WARNING
"aic7xxx: WARNING: Insufficient dv settings instances\n"
"aic7xxx: for installed controllers. Using defaults\n"
"aic7xxx: Please update the aic7xxx_dv_settings array\n"
"aic7xxx: in the aic7xxx_osm.c source file.\n");
warned_user++;
}
dv = -1;
} else {
dv = aic7xxx_dv_settings[ahc->unit];
......@@ -3969,41 +3869,20 @@ ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
struct ahc_softc *ahc;
struct ahc_cmd *acmd;
u_long flags;
struct ahc_linux_device *next_dev;
ahc = (struct ahc_softc *) dev_id;
ahc_lock(ahc, &flags);
ahc_intr(ahc);
acmd = TAILQ_FIRST(&ahc->platform_data->completeq);
TAILQ_INIT(&ahc->platform_data->completeq);
next_dev = ahc_linux_next_device_to_run(ahc);
/*
* In 2.5.X, the "done lock" is the ahc_lock.
* Instead of dropping and re-acquiring the same
* lock in the 2.5.X case, just hold our ahc_lock
* the whole time. ahc_done_lock() has been
* made a no-op for 2.5.X too.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
if (next_dev)
if (ahc_linux_next_device_to_run(ahc) != NULL)
ahc_schedule_runq(ahc);
if (acmd != NULL) {
acmd = ahc_linux_run_complete_queue(ahc, acmd);
if (acmd != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_lock(ahc, &flags);
#endif
if (acmd != NULL)
ahc_schedule_completeq(ahc, acmd);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &flags);
#endif
}
void
......@@ -4486,7 +4365,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
}
ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
ahc_set_scsi_status(scb, SCSI_STATUS_OK);
ahc_set_tags(ahc, &devinfo,
ahc_platform_set_tags(ahc, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
break;
......@@ -4497,7 +4376,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
*/
dev->openings = 1;
ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
ahc_set_tags(ahc, &devinfo,
ahc_platform_set_tags(ahc, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
/* FALLTHROUGH */
......@@ -4800,6 +4679,7 @@ ahc_linux_release_simq(u_long arg)
ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_RELEASE;
up(&ahc->platform_data->dv_sem);
}
ahc_schedule_runq(ahc);
ahc_unlock(ahc, &s);
/*
* There is still a race here. The mid-layer
......@@ -4809,8 +4689,6 @@ ahc_linux_release_simq(u_long arg)
*/
if (unblock_reqs)
scsi_unblock_requests(ahc->platform_data->host);
ahc_schedule_runq(ahc);
}
static void
......@@ -4881,9 +4759,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
* by acquiring either the io_request_lock or our own
* lock, this *should* be safe.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_unlock_irq(&io_request_lock);
#endif
ahc_midlayer_entrypoint_lock(ahc, &s);
/*
......@@ -5140,11 +5015,7 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
int ret;
ahc->platform_data->flags |= AHC_UP_EH_SEMAPHORE;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_unlock(ahc, &s);
#else
spin_unlock_irq(ahc->platform_data->host->host_lock);
#endif
spin_unlock_irq(&ahc->platform_data->spin_lock);
init_timer(&timer);
timer.data = (u_long)ahc;
timer.expires = jiffies + (5 * HZ);
......@@ -5158,27 +5029,17 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
printf("Timer Expired\n");
retval = FAILED;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_lock(ahc, &s);
#else
spin_lock_irq(ahc->platform_data->host->host_lock);
#endif
spin_lock_irq(&ahc->platform_data->spin_lock);
}
acmd = TAILQ_FIRST(&ahc->platform_data->completeq);
TAILQ_INIT(&ahc->platform_data->completeq);
ahc_midlayer_entrypoint_unlock(ahc, &s);
ahc_schedule_runq(ahc);
if (acmd != NULL) {
acmd = ahc_linux_run_complete_queue(ahc, acmd);
if (acmd != NULL) {
ahc_midlayer_entrypoint_lock(ahc, &s);
if (acmd != NULL)
ahc_schedule_completeq(ahc, acmd);
ahc_midlayer_entrypoint_unlock(ahc, &s);
}
}
ahc_schedule_runq(ahc);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_lock_irq(&io_request_lock);
#endif
ahc_midlayer_entrypoint_unlock(ahc, &s);
return (retval);
}
......@@ -5241,8 +5102,22 @@ ahc_linux_init(void)
static void __exit
ahc_linux_exit(void)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
struct ahc_softc *ahc;
u_long l;
/*
* Shutdown DV threads before going into the SCSI mid-layer.
* This avoids situations where the mid-layer locks the entire
* kernel so that waiting for our DV threads to exit leads
* to deadlock.
*/
ahc_list_lock(&l);
TAILQ_FOREACH(ahc, &ahc_tailq, links) {
ahc_linux_kill_dv_thread(ahc);
}
ahc_list_unlock(&l);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
ahc_linux_pci_exit();
......
......@@ -53,7 +53,7 @@
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#129 $
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#131 $
*
*/
#ifndef _AIC7XXX_LINUX_H_
......@@ -299,7 +299,13 @@ ahc_scb_timer_reset(struct scb *scb, u_int usec)
#include <linux/smp.h>
#endif
#define AIC7XXX_DRIVER_VERSION "6.2.30"
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
#define AHC_SCSI_HAS_HOST_LOCK 1
#else
#define AHC_SCSI_HAS_HOST_LOCK 0
#endif
#define AIC7XXX_DRIVER_VERSION "6.2.31"
/**************************** Front End Queues ********************************/
/*
......@@ -703,7 +709,6 @@ ahc_lockinit(struct ahc_softc *ahc)
static __inline void
ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
{
*flags = 0;
spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
}
......@@ -717,10 +722,13 @@ static __inline void
ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
{
/*
* In 2.5.X, the midlayer takes our lock just before
* calling us, so avoid locking again.
* In 2.5.X and some 2.4.X versions, the midlayer takes our
* lock just before calling us, so we avoid locking again.
* For other kernel versions, the io_request_lock is taken
* just before our entry point is called. In this case, we
* trade the io_request_lock for our per-softc lock.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#if AHC_SCSI_HAS_HOST_LOCK == 0
ahc_lock(ahc, flags);
#endif
}
......@@ -728,11 +736,7 @@ ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
static __inline void
ahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags)
{
/*
* In 2.5.X, the midlayer takes our lock just before
* calling us and unlocks when we return, so let it do the unlock.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#if AHC_SCSI_HAS_HOST_LOCK == 0
ahc_unlock(ahc, flags);
#endif
}
......@@ -750,8 +754,7 @@ ahc_done_lockinit(struct ahc_softc *ahc)
static __inline void
ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
*flags = 0;
#if AHC_SCSI_HAS_HOST_LOCK == 0
spin_lock_irqsave(&io_request_lock, *flags);
#endif
}
......@@ -759,7 +762,7 @@ ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
static __inline void
ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#if AHC_SCSI_HAS_HOST_LOCK == 0
spin_unlock_irqrestore(&io_request_lock, *flags);
#endif
}
......@@ -773,7 +776,6 @@ ahc_list_lockinit()
static __inline void
ahc_list_lock(unsigned long *flags)
{
*flags = 0;
spin_lock_irqsave(&ahc_list_spinlock, *flags);
}
......@@ -793,7 +795,6 @@ ahc_lockinit(struct ahc_softc *ahc)
static __inline void
ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
{
*flags = 0;
save_flags(*flags);
cli();
}
......@@ -832,7 +833,6 @@ ahc_list_lockinit()
static __inline void
ahc_list_lock(unsigned long *flags)
{
*flags = 0;
save_flags(*flags);
cli();
}
......
......@@ -39,7 +39,7 @@
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#61 $
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#62 $
*
* $FreeBSD$
*/
......@@ -752,10 +752,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
full_id = ahc_compose_id(device,
vendor,
subdevice,
subvendor);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
* If the second function is not hooked up, ignore it.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment