Commit dcde1f6f authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-scsi.bkbits.net/scsi-for-linus-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 68da1c2a 61561538
......@@ -149,19 +149,8 @@ config SCSI_MULTI_LUN
can say Y here to force the SCSI driver to probe for multiple LUNs.
A SCSI device with multiple LUNs acts logically like multiple SCSI
devices. The vast majority of SCSI devices have only one LUN, and
so most people can say N here and should in fact do so, because it
is safer.
config SCSI_REPORT_LUNS
bool "Build with SCSI REPORT LUNS support"
depends on SCSI
default y
help
If you want support for SCSI REPORT LUNS, say Y here.
The REPORT LUNS command is useful for devices (such as disk arrays)
with large numbers of LUNs where the LUN values are not contiguous
(sparse LUN). REPORT LUNS scanning is done only for SCSI-3 devices.
Most users can safely answer N here.
so most people can say N here. The max_luns boot/module parameter
allows to override this setting.
config SCSI_CONSTANTS
bool "Verbose SCSI error reporting (kernel size +=12K)"
......@@ -363,7 +352,7 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
# All the I2O code and drivers do not seem to be 64bit safe.
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
depends on !64BIT && SCSI && BROKEN
depends on !64BIT && SCSI
help
This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained
......
......@@ -170,7 +170,6 @@ enum Phase {
/* Static function prototypes */
static void NCR53c406a_intr(int, void *, struct pt_regs *);
static irqreturn_t do_NCR53c406a_intr(int, void *, struct pt_regs *);
static void wait_intr(void);
static void chip_init(void);
static void calc_port_addr(void);
#ifndef IRQ_LEV
......@@ -665,6 +664,7 @@ static const char *NCR53c406a_info(struct Scsi_Host *SChost)
return (info_msg);
}
#if 0
static void wait_intr(void)
{
unsigned long i = jiffies + WATCHDOG;
......@@ -684,6 +684,7 @@ static void wait_intr(void)
NCR53c406a_intr(0, NULL, NULL);
}
#endif
static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
......
......@@ -38,15 +38,19 @@ Deanna Bonds <deanna_bonds@adaptec.com> (non-DASD support, PAE fibs and 64 bit,
(fixed 64bit and 64G memory model, changed confusing naming convention
where fibs that go to the hardware are consistently called hw_fibs and
not just fibs like the name of the driver tracking structure)
Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
Original Driver
-------------------------
Adaptec Unix OEM Product Group
Mailing List
-------------------------
None currently. Also note this is very different to Brian's original driver
linux-aacraid-devel@dell.com (Interested parties troll here)
http://mbserver.adaptec.com/ (Currently more Community Support than Devel Support)
Also note this is very different to Brian's original driver
so don't expect him to support it.
Adaptec does support this driver. Contact either tech support or deanna bonds.
Adaptec does support this driver. Contact either tech support or Mark Salyzyn.
Original by Brian Boerner February 2001
Rewritten by Alan Cox, November 2001
//#define dprintk(x) printk x
#define dprintk(x)
#if (!defined(dprintk))
# define dprintk(x)
#endif
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
#define MAXIMUM_NUM_CONTAINERS 31
#define MAXIMUM_NUM_ADAPTERS 8
#define AAC_NUM_FIB 578
#define AAC_NUM_FIB 578
//#define AAC_NUM_IO_FIB 512
#define AAC_NUM_IO_FIB 100
#define AAC_NUM_IO_FIB 100
#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
#define AAC_MAX_LUN (8)
#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
#define AAC_MAX_LUN (8)
#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
......@@ -240,92 +242,6 @@ enum aac_queue_types {
AdapHighRespQueue /* Host to adapter high priority response traffic */
};
/*
* Implement our own version of these so we have 64 bit compatability
* The adapter uses these and can only handle 32 bit addresses
*/
struct aac_list_head {
u32 next;
u32 prev;
};
#define AAC_INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (u32)(ulong)(ptr); \
(ptr)->prev = (u32)(ulong)(ptr); \
} while (0)
/**
* aac_list_empty - tests whether a list is empty
* @head: the list to test.
*/
static __inline__ int aac_list_empty(struct aac_list_head *head)
{
return head->next == ((u32)(ulong)head);
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline__ void aac_list_add(struct aac_list_head * n,
struct aac_list_head * prev,
struct aac_list_head * next)
{
next->prev = (u32)(ulong)n;
n->next = (u32)(ulong)next;
n->prev = (u32)(ulong)prev;
prev->next = (u32)(ulong)n;
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static __inline__ void aac_list_add_tail(struct aac_list_head *n, struct aac_list_head *head)
{
aac_list_add(n, (struct aac_list_head*)(ulong)(head->prev), head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline__ void __aac_list_del(struct aac_list_head * p,
struct aac_list_head * n)
{
n->prev = (u32)(ulong)p;
p->next = (u32)(ulong)n;
}
/**
* aac_list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is in an undefined state.
*/
static __inline__ void aac_list_del(struct aac_list_head *entry)
{
__aac_list_del((struct aac_list_head*)(ulong)entry->prev,(struct aac_list_head*)(ulong) entry->next);
entry->next = entry->prev = 0;
}
/**
* aac_list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define aac_list_entry(ptr, type, member) \
((type *)((char *)(ptr)-(ulong)(&((type *)0)->member)))
/*
* Assign type values to the FSA communication data structures
*/
......@@ -339,11 +255,11 @@ static __inline__ void aac_list_del(struct aac_list_head *entry)
#define FsaNormal 1
#define FsaHigh 2
/*
* Define the FIB. The FIB is the where all the requested data and
* command information are put to the application on the FSA adapter.
*/
struct aac_fibhdr {
u32 XferState; // Current transfer state for this CCB
u16 Command; // Routing information for the destination
......@@ -359,13 +275,9 @@ struct aac_fibhdr {
u32 _ReceiverTimeStart; // Timestamp for receipt of fib
u32 _ReceiverTimeDone; // Timestamp for completion of fib
} _s;
struct aac_list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
// struct list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
} _u;
};
#define FibLinks _u._FibLinks
#define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
......@@ -558,12 +470,11 @@ struct aac_queue {
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
u32 padding; /* Padding - FIXME - can remove I believe */
struct aac_list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
// struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
u32 numpending; /* Number of entries on outstanding queue. */
struct aac_dev * dev; /* Back pointer to adapter structure */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
u32 numpending; /* Number of entries on outstanding queue. */
struct aac_dev * dev; /* Back pointer to adapter structure */
};
/*
......@@ -744,7 +655,7 @@ struct aac_fib_context {
struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
int wait; // Set to true when thread is in WaitForSingleObject
unsigned long count; // total number of FIBs on FibList
struct aac_list_head hw_fib_list; // this holds hw_fibs which should be 32 bit addresses
struct list_head fib_list; // this holds fibs and their attachd hw_fibs
};
struct fsa_scsi_hba {
......@@ -781,7 +692,11 @@ struct fib {
* Outstanding I/O queue.
*/
struct list_head queue;
/*
* And for the internal issue/reply queues (we may be able
* to merge these two)
*/
struct list_head fiblink;
void *data;
struct hw_fib *hw_fib; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
......@@ -836,19 +751,19 @@ struct aac_adapter_info
/*
* Supported Options
*/
#define AAC_OPT_SNAPSHOT cpu_to_le32(1)
#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1)
#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2)
#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3)
#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
#define AAC_OPT_ALARM cpu_to_le32(1<<11)
#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
#define AAC_OPT_SNAPSHOT cpu_to_le32(1)
#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1)
#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2)
#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3)
#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
#define AAC_OPT_ALARM cpu_to_le32(1<<11)
#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
struct aac_dev
{
......@@ -862,11 +777,10 @@ struct aac_dev
*/
dma_addr_t hw_fib_pa;
struct hw_fib *hw_fib_va;
ulong fib_base_va;
struct hw_fib *aif_base_va;
/*
* Fib Headers
*/
// dmb struct fib fibs[AAC_NUM_FIB]; /* Doing it here takes up too much from the scsi pool*/
struct fib *fibs;
struct fib *free_fib;
......@@ -887,7 +801,6 @@ struct aac_dev
unsigned long fsrev; /* Main driver's revision number */
struct aac_init *init; /* Holds initialization info to communicate with adapter */
// void * init_pa; /* Holds physical address of the init struct */
dma_addr_t init_pa; /* Holds physical address of the init struct */
struct pci_dev *pdev; /* Our PCI interface */
......@@ -898,7 +811,7 @@ struct aac_dev
struct Scsi_Host *scsi_host_ptr;
struct fsa_scsi_hba fsa_dev;
int thread_pid;
pid_t thread_pid;
int cardtype;
/*
......
......@@ -148,7 +148,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void *arg)
* the list to 0.
*/
fibctx->count = 0;
AAC_INIT_LIST_HEAD(&fibctx->hw_fib_list);
INIT_LIST_HEAD(&fibctx->fib_list);
fibctx->jiffies = jiffies/HZ;
/*
* Now add this context onto the adapter's
......@@ -179,7 +179,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
{
struct fib_ioctl f;
struct aac_fib_context *fibctx, *aifcp;
struct hw_fib * hw_fib;
struct fib *fib;
int status;
struct list_head * entry;
int found;
......@@ -222,25 +222,27 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
* -EAGAIN
*/
return_fib:
if (!aac_list_empty(&fibctx->hw_fib_list)) {
struct aac_list_head * entry;
if (!list_empty(&fibctx->fib_list)) {
struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = (struct aac_list_head*)(ulong)fibctx->hw_fib_list.next;
aac_list_del(entry);
entry = fibctx->fib_list.next;
list_del(entry);
hw_fib = aac_list_entry(entry, struct hw_fib, header.FibLinks);
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags);
if (copy_to_user(f.fib, hw_fib, sizeof(struct hw_fib))) {
kfree(hw_fib);
if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
kfree(fib->hw_fib);
kfree(fib);
return -EFAULT;
}
/*
* Free the space occupied by this copy of the fib.
*/
kfree(hw_fib);
kfree(fib->hw_fib);
kfree(fib);
status = 0;
fibctx->jiffies = jiffies/HZ;
} else {
......@@ -262,24 +264,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
{
struct hw_fib *hw_fib;
struct fib *fib;
/*
* First free any FIBs that have not been consumed.
*/
while (!aac_list_empty(&fibctx->hw_fib_list)) {
struct aac_list_head * entry;
while (!list_empty(&fibctx->fib_list)) {
struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = (struct aac_list_head*)(ulong)(fibctx->hw_fib_list.next);
aac_list_del(entry);
hw_fib = aac_list_entry(entry, struct hw_fib, header.FibLinks);
entry = fibctx->fib_list.next;
list_del(entry);
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
/*
* Free the space occupied by this copy of the fib.
*/
kfree(hw_fib);
kfree(fib->hw_fib);
kfree(fib);
}
/*
* Remove the Context from the AdapterFibContext List
......
......@@ -81,9 +81,9 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
* Adapter Fibs are the first thing allocated so that they
* start page aligned
*/
dev->fib_base_va = (ulong)base;
dev->aif_base_va = (struct hw_fib *)base;
init->AdapterFibsVirtualAddress = cpu_to_le32((u32)(ulong)phys);
init->AdapterFibsVirtualAddress = cpu_to_le32(0);
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
init->AdapterFibsSize = cpu_to_le32(fibsize);
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
......@@ -94,6 +94,9 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
* mapping system, but older Firmware did, and had *troubles* dealing
* with the math overloading past 32 bits, thus we must limit this
* field.
*
* FIXME: this assumes the memory is mapped zero->n, which isnt
* always true on real computers.
*/
if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
init->HostPhysMemPages =
......@@ -140,7 +143,7 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->dev = dev;
INIT_LIST_HEAD(&q->pendingq);
init_waitqueue_head(&q->cmdready);
AAC_INIT_LIST_HEAD(&q->cmdq);
INIT_LIST_HEAD(&q->cmdq);
init_waitqueue_head(&q->qfull);
spin_lock_init(&q->lockdata);
q->lock = &q->lockdata;
......
......@@ -133,13 +133,10 @@ struct fib * fib_alloc(struct aac_dev *dev)
unsigned long flags;
spin_lock_irqsave(&dev->fib_lock, flags);
fibptr = dev->free_fib;
while(!fibptr){
spin_unlock_irqrestore(&dev->fib_lock, flags);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
spin_lock_irqsave(&dev->fib_lock, flags);
fibptr = dev->free_fib;
}
/* Cannot sleep here or you get hangs. Instead we did the
maths at compile time. */
if(!fibptr)
BUG();
dev->free_fib = fibptr->next;
spin_unlock_irqrestore(&dev->fib_lock, flags);
/*
......@@ -290,7 +287,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
}
}
/*Command thread: *
/**
* aac_queue_get - get the next free QE
* @dev: Adapter
* @index: Returned index
......@@ -450,8 +447,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
* Map the fib into 32bits by using the fib number
*/
// hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1;
hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa);
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
/*
* Set FIB state to indicate where it came from and if we want a
......@@ -492,7 +488,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
dprintk((KERN_DEBUG " hw_fib pa being sent=%xl\n",(ulong)fibptr->hw_fib_pa));
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
/*
* Fill in the Callback and CallbackContext if we are not
......@@ -806,8 +802,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
int aac_command_thread(struct aac_dev * dev)
{
struct hw_fib *hw_fib, *newfib;
struct fib fibptr; /* for error logging */
struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib;
struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
unsigned long flags;
......@@ -828,42 +824,44 @@ int aac_command_thread(struct aac_dev * dev)
* Let the DPC know it has a place to send the AIF's to.
*/
dev->aif_thread = 1;
memset(&fibptr, 0, sizeof(struct fib));
add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while(1)
{
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
while(!aac_list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
struct aac_list_head *entry;
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
entry = (struct aac_list_head*)(ulong)(queues->queue[HostNormCmdQueue].cmdq.next);
dprintk(("aacraid: Command thread: removing fib from cmdq (%p)\n",entry));
aac_list_del(entry);
entry = queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
hw_fib = aac_list_entry(entry, struct hw_fib, header.FibLinks);
fib = list_entry(entry, struct fib, fiblink);
/*
* We will process the FIB here or pass it to a
* worker thread that is TBD. We Really can't
* do anything at this point since we don't have
* anything defined for this thread to do.
*/
memset(&fibptr, 0, sizeof(struct fib));
fibptr.type = FSAFS_NTC_FIB_CONTEXT;
fibptr.size = sizeof( struct fib );
fibptr.hw_fib = hw_fib;
fibptr.data = hw_fib->data;
fibptr.dev = dev;
hw_fib = fib->hw_fib;
memset(fib, 0, sizeof(struct fib));
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof( struct fib );
fib->hw_fib = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
/*
* We only handle AifRequest fibs from the adapter.
*/
aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
aac_handle_aif(dev, &fibptr);
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
/* Handle Driver Notify Events */
aac_handle_aif(dev, fib);
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, sizeof(u32));
} else {
struct list_head *entry;
/* The u32 here is important and intended. We are using
......@@ -872,6 +870,10 @@ int aac_command_thread(struct aac_dev * dev)
u32 time_now, time_last;
unsigned long flagv;
/* Sniff events */
if (aifcmd->command == cpu_to_le32(AifCmdEventNotify))
aac_handle_aif(dev, fib);
time_now = jiffies/HZ;
spin_lock_irqsave(&dev->fib_lock, flagv);
......@@ -893,6 +895,11 @@ int aac_command_thread(struct aac_dev * dev)
*/
if (fibctx->count > 20)
{
/*
* It's *not* jiffies folks,
* but jiffies / HZ so do not
* panic ...
*/
time_last = fibctx->jiffies;
/*
* Has it been > 2 minutes
......@@ -909,17 +916,20 @@ int aac_command_thread(struct aac_dev * dev)
* Warning: no sleep allowed while
* holding spinlock
*/
newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
if (newfib) {
hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
if (newfib && hw_newfib) {
/*
* Make the copy of the FIB
*/
memcpy(newfib, hw_fib, sizeof(struct hw_fib));
memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
memcpy(newfib, fib, sizeof(struct fib));
newfib->hw_fib = hw_newfib;
/*
* Put the FIB onto the
* fibctx's fibs
*/
aac_list_add_tail(&newfib->header.FibLinks, &fibctx->hw_fib_list);
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
fibctx->count++;
/*
* Set the event to wake up the
......@@ -928,6 +938,10 @@ int aac_command_thread(struct aac_dev * dev)
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
if(newfib)
kfree(newfib);
if(hw_newfib)
kfree(hw_newfib);
}
entry = entry->next;
}
......@@ -935,10 +949,11 @@ int aac_command_thread(struct aac_dev * dev)
* Set the status of this FIB
*/
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(&fibptr, sizeof(u32));
fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
}
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
kfree(fib);
}
/*
* There are no more AIF's
......
......@@ -70,12 +70,12 @@ unsigned int aac_response_normal(struct aac_queue * q)
*/
while(aac_consumer_get(dev, q, &entry))
{
u32 fast ;
fast = (entry->addr & cpu_to_le32(0x01));
hwfib = (struct hw_fib *)((char *)dev->hw_fib_va +
((entry->addr & ~0x01) - dev->hw_fib_pa));
fib = &dev->fibs[hwfib->header.SenderData];
int fast;
u32 index = le32_to_cpu(entry->addr);
fast = index & 0x01;
fib = &dev->fibs[index >> 1];
hwfib = fib->hw_fib;
aac_consumer_free(dev, q, HostNormRespQueue);
/*
* Remove this fib from the Outstanding I/O queue.
......@@ -169,29 +169,44 @@ unsigned int aac_command_normal(struct aac_queue *q)
*/
while(aac_consumer_get(dev, q, &entry))
{
struct fib fibctx;
struct hw_fib * hw_fib;
hw_fib = (struct hw_fib *)((char *)dev->hw_fib_va +
((entry->addr & ~0x01) - dev->hw_fib_pa));
if (dev->aif_thread) {
aac_list_add_tail(&hw_fib->header.FibLinks, &q->cmdq);
u32 index;
struct fib *fib = &fibctx;
index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
hw_fib = &dev->aif_base_va[index];
/*
* Allocate a FIB at all costs. For non queued stuff
* we can just use the stack so we are happy. We need
* a fib object in order to manage the linked lists
*/
if (dev->aif_thread)
if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
fib = &fibctx;
memset(fib, 0, sizeof(struct fib));
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
if (dev->aif_thread && fib != &fibctx) {
list_add_tail(&fib->fiblink, &q->cmdq);
aac_consumer_free(dev, q, HostNormCmdQueue);
wake_up_interruptible(&q->cmdready);
} else {
struct fib fibctx;
aac_consumer_free(dev, q, HostNormCmdQueue);
spin_unlock_irqrestore(q->lock, flags);
memset(&fibctx, 0, sizeof(struct fib));
fibctx.type = FSAFS_NTC_FIB_CONTEXT;
fibctx.size = sizeof(struct fib);
fibctx.hw_fib = hw_fib;
fibctx.data = hw_fib->data;
fibctx.dev = dev;
/*
* Set the status of this FIB
*/
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(&fibctx, sizeof(u32));
fib_adapter_complete(fib, sizeof(u32));
spin_lock_irqsave(q->lock, flags);
}
}
......
......@@ -419,6 +419,11 @@ int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
* Start any kernel threads needed
*/
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
if (dev->thread_pid < 0) {
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
return -1;
}
/*
* Tell the adapter that all is configure, and it can start
* accepting requests
......
......@@ -4,6 +4,7 @@
* Copyright (C) 1997 Wu Ching Chen
* 2.1.x update (C) 1998 Krzysztof G. Baranowski
* 2.5.x update (C) 2002 Red Hat <alan@redhat.com>
* 2.6.x update (C) 2004 Red Hat <alan@redhat.com>
*
* Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
*
......@@ -126,9 +127,11 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id,
/*
* Issue more commands
*/
spin_lock_irqsave(dev->host->host_lock, flags);
if (((dev->quhdu != dev->quendu) || (dev->last_cmd != 0xff)) && (dev->in_snd == 0)) {
send_s870(host);
}
spin_unlock_irqrestore(dev->host->host_lock, flags);
/*
* Done
*/
......@@ -371,9 +374,11 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id,
/*
* If there is stuff to send and nothing going then send it
*/
spin_lock_irqsave(dev->host->host_lock, flags);
if (((dev->last_cmd != 0xff) || (dev->quhdu != dev->quendu)) && (dev->in_snd == 0)) {
send_s870(host);
}
spin_unlock_irqrestore(dev->host->host_lock, flags);
dev->in_int = 0;
goto out;
}
......@@ -443,9 +448,16 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id,
return IRQ_HANDLED;
}
/**
* atp870u_queuecommand - Queue SCSI command
* @req_p: request block
* @done: completion function
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
static int atp870u_queuecommand(Scsi_Cmnd * req_p, void (*done) (Scsi_Cmnd *))
{
unsigned long flags;
unsigned short int m;
unsigned int tmport;
struct Scsi_Host *host;
......@@ -484,7 +496,6 @@ static int atp870u_queuecommand(Scsi_Cmnd * req_p, void (*done) (Scsi_Cmnd *))
* Count new command
*/
spin_lock_irqsave(host->host_lock, flags);
dev->quendu++;
if (dev->quendu >= qcnt) {
dev->quendu = 0;
......@@ -498,24 +509,31 @@ static int atp870u_queuecommand(Scsi_Cmnd * req_p, void (*done) (Scsi_Cmnd *))
}
dev->quendu--;
req_p->result = 0x00020000;
spin_unlock_irqrestore(host->host_lock, flags);
done(req_p);
return 0;
}
dev->querequ[dev->quendu] = req_p;
tmport = dev->ioport + 0x1c;
spin_unlock_irqrestore(host->host_lock, flags);
if ((inb(tmport) == 0) && (dev->in_int == 0) && (dev->in_snd == 0)) {
send_s870(host);
}
return 0;
}
/**
* send_s870 - send a command to the controller
* @host: host
*
* On entry there is work queued to be done. We move some of that work to the
* controller itself.
*
* Caller holds the host lock.
*/
static void send_s870(struct Scsi_Host *host)
{
unsigned int tmport;
Scsi_Cmnd *workrequ;
unsigned long flags;
unsigned int i;
unsigned char j, target_id;
unsigned char *prd;
......@@ -527,10 +545,7 @@ static void send_s870(struct Scsi_Host *host)
struct atp_unit *dev = (struct atp_unit *)&host->hostdata;
int sg_count;
spin_lock_irqsave(host->host_lock, flags);
if (dev->in_snd != 0) {
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
dev->in_snd = 1;
......@@ -543,13 +558,11 @@ static void send_s870(struct Scsi_Host *host)
dev->last_cmd = 0xff;
if (dev->quhdu == dev->quendu) {
dev->in_snd = 0;
spin_unlock_irqrestore(dev->host->host_lock, flags);
return;
}
}
if ((dev->last_cmd != 0xff) && (dev->working != 0)) {
dev->in_snd = 0;
spin_unlock_irqrestore(dev->host->host_lock, flags);
return;
}
dev->working++;
......@@ -567,7 +580,6 @@ static void send_s870(struct Scsi_Host *host)
dev->quhdu = j;
dev->working--;
dev->in_snd = 0;
spin_unlock_irqrestore(host->host_lock, flags);
return;
cmd_subp:
workportu = dev->ioport;
......@@ -582,7 +594,6 @@ static void send_s870(struct Scsi_Host *host)
abortsnd:
dev->last_cmd |= 0x40;
dev->in_snd = 0;
spin_unlock_irqrestore(dev->host->host_lock, flags);
return;
oktosend:
memcpy(&dev->ata_cdbu[0], &workrequ->cmnd[0], workrequ->cmd_len);
......@@ -684,7 +695,6 @@ static void send_s870(struct Scsi_Host *host)
dev->last_cmd |= 0x40;
}
dev->in_snd = 0;
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
tmpcip = dev->pciport;
......@@ -770,7 +780,6 @@ static void send_s870(struct Scsi_Host *host)
dev->last_cmd |= 0x40;
}
dev->in_snd = 0;
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
if (inb(tmport) == 0) {
......@@ -781,9 +790,6 @@ static void send_s870(struct Scsi_Host *host)
dev->last_cmd |= 0x40;
}
dev->in_snd = 0;
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val)
......
......@@ -19,7 +19,7 @@
#endif
#define DC390_BANNER "Tekram DC390/AM53C974"
#define DC390_VERSION "2.0f 2000-12-20"
#define DC390_VERSION "2.1b 2004-04-13"
/* We don't have eh_abort_handler, eh_device_reset_handler,
* eh_bus_reset_handler, eh_host_reset_handler yet!
......@@ -33,11 +33,11 @@
# define USE_NEW_EH
#endif
extern int DC390_detect(Scsi_Host_Template *psht);
extern int DC390_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
extern int DC390_abort(Scsi_Cmnd *cmd);
extern int DC390_reset(Scsi_Cmnd *cmd);
extern int DC390_bios_param(struct scsi_device *sdev, struct block_device *dev,
static int DC390_detect(Scsi_Host_Template *psht);
static int DC390_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
static int DC390_abort(Scsi_Cmnd *cmd);
static int DC390_reset(Scsi_Cmnd *cmd);
static int DC390_bios_param(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int geom[]);
static int DC390_release(struct Scsi_Host *);
......
This diff is collapsed.
......@@ -65,7 +65,7 @@ static int adpt_device_reset(Scsi_Cmnd* cmd);
#include "dpt/dpti_i2o.h"
#include "dpt/dpti_ioctl.h"
#define DPT_I2O_VERSION "2.4 Build 5"
#define DPT_I2O_VERSION "2.4 Build 5go"
#define DPT_VERSION 2
#define DPT_REVISION '4'
#define DPT_SUBREVISION '5'
......@@ -272,7 +272,7 @@ struct sg_simple_element {
static void adpt_i2o_sys_shutdown(void);
static int adpt_init(void);
static int adpt_i2o_build_sys_table(void);
static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs);
#ifdef REBOOT_NOTIFIER
static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p);
#endif
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,7 +2,7 @@
#define _GDTH_IOCTL_H
/* gdth_ioctl.h
* $Id: gdth_ioctl.h,v 1.11 2003/02/27 14:59:03 achim Exp $
* $Id: gdth_ioctl.h,v 1.14 2004/02/19 15:43:15 achim Exp $
*/
/* IOCTLs */
......@@ -21,8 +21,8 @@
#define GDTIOCTL_RESCAN (GDTIOCTL_MASK |11) /* rescan host drives */
#define GDTIOCTL_RESET_DRV (GDTIOCTL_MASK |12) /* reset (remote) drv. res. */
#define GDTIOCTL_MAGIC 0xaffe0004
#define EVENT_SIZE 294
#define GDTIOCTL_MAGIC 0xaffe0004
#define EVENT_SIZE 294
#define GDTH_MAXSG 32 /* max. s/g elements */
#define MAX_LDRIVES 255 /* max. log. drive count */
......@@ -35,7 +35,9 @@
/* typedefs */
#ifdef __KERNEL__
typedef u32 ulong32;
typedef u64 ulong64;
#endif
#define PACKED __attribute__((packed))
/* scatter/gather element */
......@@ -44,6 +46,12 @@ typedef struct {
ulong32 sg_len; /* length */
} PACKED gdth_sg_str;
/* scatter/gather element - 64bit addresses */
typedef struct {
ulong64 sg_ptr; /* address */
ulong32 sg_len; /* length */
} PACKED gdth_sg64_str;
/* command structure */
typedef struct {
ulong32 BoardNode; /* board node (always 0) */
......@@ -58,18 +66,26 @@ typedef struct {
ulong32 sg_canz; /* s/g element count */
gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED cache; /* cache service cmd. str. */
struct {
ushort DeviceNo; /* number of cache drive */
ulong64 BlockNo; /* block number */
ulong32 BlockCnt; /* block count */
ulong64 DestAddr; /* dest. addr. (if s/g: -1) */
ulong32 sg_canz; /* s/g element count */
gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED cache64; /* cache service cmd. str. */
struct {
ushort param_size; /* size of p_param buffer */
ulong32 subfunc; /* IOCTL function */
ulong32 channel; /* device */
ulong32 p_param; /* buffer */
ulong64 p_param; /* buffer */
} PACKED ioctl; /* IOCTL command structure */
struct {
ushort reserved;
union {
struct {
ulong32 msg_handle; /* message handle */
ulong32 msg_addr; /* message buffer address */
ulong64 msg_addr; /* message buffer address */
} PACKED msg;
unchar data[12]; /* buffer for rtc data, ... */
} su;
......@@ -93,6 +109,24 @@ typedef struct {
ulong32 sg_ranz; /* s/g element count */
gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED raw; /* raw service cmd. struct. */
struct {
ushort reserved;
ulong32 direction; /* data direction */
ulong32 mdisc_time; /* disc. time (0: no timeout)*/
ulong32 mcon_time; /* connect time(0: no to.) */
ulong64 sdata; /* dest. addr. (if s/g: -1) */
ulong32 sdlen; /* data length (bytes) */
ulong32 clen; /* SCSI cmd. length(6,..,16) */
unchar cmd[16]; /* SCSI command */
unchar target; /* target ID */
unchar lun; /* LUN */
unchar bus; /* SCSI bus number */
unchar priority; /* only 0 used */
ulong32 sense_len; /* sense data length */
ulong64 sense_data; /* sense data addr. */
ulong32 sg_ranz; /* s/g element count */
gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED raw64; /* raw service cmd. struct. */
} u;
/* additional variables */
unchar Service; /* controller service */
......@@ -236,7 +270,6 @@ typedef struct {
} gdth_iord_str;
#endif
#ifdef GDTH_IOCTL_CHRDEV
/* GDTIOCTL_GENERAL */
typedef struct {
ushort ionode; /* controller number */
......@@ -244,8 +277,8 @@ typedef struct {
ulong32 info; /* error info */
ushort status; /* status */
ulong data_len; /* data buffer size */
ulong sense_len; /* sense buffer size */
gdth_cmd_str command; /* command */
ulong sense_len; /* sense buffer size */
gdth_cmd_str command; /* command */
} gdth_ioctl_general;
/* GDTIOCTL_LOCKDRV */
......@@ -310,6 +343,5 @@ typedef struct {
ushort number; /* bus/host drive number */
ushort status; /* status */
} gdth_ioctl_reset;
#endif
#endif
This diff is collapsed.
......@@ -2,42 +2,31 @@
#define _GDTH_PROC_H
/* gdth_proc.h
* $Id: gdth_proc.h,v 1.14 2003/08/27 11:37:35 achim Exp $
* $Id: gdth_proc.h,v 1.16 2004/01/14 13:09:01 achim Exp $
*/
static int gdth_set_info(char *buffer,int length,int hanum,int busnum);
static int gdth_get_info(char *buffer,char **start,off_t offset,
int length,int hanum,int busnum);
static int gdth_set_info(char *buffer,int length,struct Scsi_Host *host,
int hanum,int busnum);
static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
struct Scsi_Host *host,int hanum,int busnum);
#if LINUX_VERSION_CODE >= 0x020503
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
static void gdth_do_req(Scsi_Request *srp, gdth_cmd_str *cmd,
char *cmnd, int timeout);
static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Request *scp);
#ifdef GDTH_IOCTL_PROC
static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Request *scp);
#endif
#elif LINUX_VERSION_CODE >= 0x020322
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static void gdth_do_cmd(Scsi_Cmnd *scp, gdth_cmd_str *cmd,
char *cmnd, int timeout);
static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd *scp);
#ifdef GDTH_IOCTL_PROC
static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd *scp);
#endif
#else
static void gdth_do_cmd(Scsi_Cmnd *scp, gdth_cmd_str *cmd,
char *cmnd, int timeout);
static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
#ifdef GDTH_IOCTL_PROC
static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
#endif
#endif
static char *gdth_ioctl_alloc(int hanum, int size, int scratch,
ulong32 *paddr);
static void gdth_ioctl_free(int hanum, int size, char *buf, ulong32 paddr);
#ifdef GDTH_IOCTL_PROC
static int gdth_ioctl_check_bin(int hanum, ushort size);
#endif
ulong64 *paddr);
static void gdth_ioctl_free(int hanum, int size, char *buf, ulong64 paddr);
static void gdth_wait_completion(int hanum, int busnum, int id);
static void gdth_stop_timeout(int hanum, int busnum, int id);
static void gdth_start_timeout(int hanum, int busnum, int id);
......
This diff is collapsed.
......@@ -36,8 +36,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.0.6"
#define IPR_DRIVER_DATE "(May 3, 2004)"
#define IPR_DRIVER_VERSION "2.0.7"
#define IPR_DRIVER_DATE "(May 21, 2004)"
/*
* IPR_DBG_TRACE: Setting this to 1 will turn on some general function tracing
......@@ -413,8 +413,8 @@ struct ipr_ioasa_af_dasd {
}__attribute__((packed, aligned (4)));
struct ipr_ioasa_gpdd {
u8 device_end_state;
u8 device_bus_phase;
u8 end_state;
u8 bus_phase;
u16 reserved;
u32 ioa_data[23];
}__attribute__((packed, aligned (4)));
......@@ -457,7 +457,7 @@ struct ipr_ioasa {
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
struct ipr_ioasa_raw raw;
};
} u;
}__attribute__((packed, aligned (4)));
struct ipr_mode_parm_hdr {
......@@ -617,14 +617,14 @@ struct ipr_hostrcb_error {
struct ipr_hostrcb_type_02_error type_02_error;
struct ipr_hostrcb_type_03_error type_03_error;
struct ipr_hostrcb_type_04_error type_04_error;
};
} u;
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_raw {
u32 data[sizeof(struct ipr_hostrcb_error)/sizeof(u32)];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb {
struct ipr_hcam {
u8 op_code;
#define IPR_HOST_RCB_OP_CODE_CONFIG_CHANGE 0xE1
#define IPR_HOST_RCB_OP_CODE_LOG_DATA 0xE2
......@@ -662,14 +662,14 @@ struct ipr_hostrcb {
struct ipr_hostrcb_error error;
struct ipr_hostrcb_cfg_ch_not ccn;
struct ipr_hostrcb_raw raw;
};
} u;
}__attribute__((packed, aligned (4)));
/* Driver added data */
struct ipr_hostrcb {
struct ipr_hcam hcam;
u32 hostrcb_dma;
struct list_head queue;
}__attribute__((packed, aligned (4)));
#define IPR_HOSTRCB_SZ offsetof(struct ipr_hostrcb, hostrcb_dma)
};
/* IPR smart dump table structures */
struct ipr_sdt_entry {
......@@ -785,7 +785,7 @@ struct ipr_trace_entry {
u32 ioasc;
u32 add_data;
u32 res_addr;
};
} u;
};
struct ipr_sglist {
......@@ -939,7 +939,8 @@ struct ipr_cmnd {
unsigned long scratch;
struct ipr_resource_entry *res;
struct ipr_cmnd *sibling;
};
struct scsi_device *sdev;
} u;
struct ipr_ioa_cfg *ioa_cfg;
};
......
......@@ -38,7 +38,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/interrupt.h>
#include <asm/dma.h>
#include <asm/system.h>
......
......@@ -538,7 +538,7 @@ int qlogicfas408_device_reset(Scsi_Cmnd * cmd)
* Return info string
*/
char *qlogicfas408_info(struct Scsi_Host *host)
const char *qlogicfas408_info(struct Scsi_Host *host)
{
struct qlogicfas408_priv *priv = get_priv_by_host(host);
return priv->qinfo;
......
......@@ -111,7 +111,7 @@ int qlogicfas408_abort(Scsi_Cmnd * cmd);
int qlogicfas408_bus_reset(Scsi_Cmnd * cmd);
int qlogicfas408_host_reset(Scsi_Cmnd * cmd);
int qlogicfas408_device_reset(Scsi_Cmnd * cmd);
char *qlogicfas408_info(struct Scsi_Host *host);
const char *qlogicfas408_info(struct Scsi_Host *host);
int qlogicfas408_get_chip_type(int qbase, int int_type);
void qlogicfas408_setup(int qbase, int id, int int_type);
int qlogicfas408_detect(int qbase, int int_type);
......
......@@ -1610,7 +1610,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_CANCEL:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_OFFLINE:
break;
default:
goto illegal;
......@@ -1619,9 +1621,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_DEL:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_CANCEL:
case SDEV_OFFLINE:
break;
default:
goto illegal;
......
......@@ -46,7 +46,7 @@ extern unsigned int scsi_logging_level;
#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) \
{ \
if ((SCSI_LOG_LEVEL(SHIFT, BITS)) > (LEVEL)) \
if (unlikely((SCSI_LOG_LEVEL(SHIFT, BITS)) > (LEVEL))) \
(CMD); \
}
#else
......
......@@ -80,7 +80,6 @@ module_param_named(max_luns, max_scsi_luns, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(max_luns,
"last scsi LUN (should be between 1 and 2^32-1)");
#ifdef CONFIG_SCSI_REPORT_LUNS
/*
* max_scsi_report_luns: the maximum number of LUNS that will be
* returned from the REPORT LUNS command. 8 times this value must
......@@ -88,13 +87,19 @@ MODULE_PARM_DESC(max_luns,
* in practice, the maximum number of LUNs suppored by any device
* is about 16k.
*/
static unsigned int max_scsi_report_luns = 128;
static unsigned int max_scsi_report_luns = 511;
module_param_named(max_report_luns, max_scsi_report_luns, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(max_report_luns,
"REPORT LUNS maximum number of LUNS received (should be"
" between 1 and 16384)");
#endif
static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3;
module_param_named(inq_timeout, scsi_inq_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(inq_timeout,
"Timeout (in seconds) waiting for devices to answer INQUIRY."
" Default is 5. Some non-compliant devices need more.");
/**
* scsi_unlock_floptical - unlock device via a special MODE SENSE command
......@@ -212,6 +217,11 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
INIT_LIST_HEAD(&sdev->starved_entry);
spin_lock_init(&sdev->list_lock);
/* if the device needs this changing, it may do so in the
* slave_configure function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
/*
* Some low level driver could use device->type
*/
......@@ -346,7 +356,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
memset(inq_result, 0, 36);
scsi_wait_req(sreq, (void *) scsi_cmd, (void *) inq_result, 36,
SCSI_TIMEOUT + 4 * HZ, 3);
HZ/2 + HZ*scsi_inq_timeout, 3);
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: 1st INQUIRY %s with"
" code 0x%x\n", sreq->sr_result ?
......@@ -400,7 +410,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
memset(inq_result, 0, possible_inq_resp_len);
scsi_wait_req(sreq, (void *) scsi_cmd,
(void *) inq_result,
possible_inq_resp_len, SCSI_TIMEOUT + 4 * HZ, 3);
possible_inq_resp_len, (1+scsi_inq_timeout)*(HZ/2), 3);
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: 2nd INQUIRY"
" %s with code 0x%x\n", sreq->sr_result ?
"failed" : "successful", sreq->sr_result));
......@@ -628,10 +638,6 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
/* if the device needs this changing, it may do so in the detect
* function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
sdev->use_10_for_rw = 1;
if (*bflags & BLIST_MS_SKIP_PAGE_08)
......@@ -863,7 +869,6 @@ static void scsi_sequential_lun_scan(struct Scsi_Host *shost, uint channel,
return;
}
#ifdef CONFIG_SCSI_REPORT_LUNS
/**
* scsilun_to_int: convert a scsi_lun to an int
* @scsilun: struct scsi_lun to be converted.
......@@ -924,9 +929,14 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
u8 *data;
/*
* Only support SCSI-3 and up devices.
* Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
* Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
* support more than 8 LUNs.
*/
if (sdev->scsi_level < SCSI_3)
if ((bflags & BLIST_NOREPORTLUN) ||
sdev->scsi_level < SCSI_2 ||
(sdev->scsi_level < SCSI_3 &&
(!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) )
return 1;
if (bflags & BLIST_NOLUN)
return 0;
......@@ -1090,9 +1100,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
return 0;
}
#else
# define scsi_report_lun_scan(sdev, blags, rescan) (1)
#endif /* CONFIG_SCSI_REPORT_LUNS */
struct scsi_device *scsi_add_device(struct Scsi_Host *shost,
uint channel, uint id, uint lun)
......
......@@ -15,7 +15,7 @@ dc390_freetag (PDCB pDCB, PSRB pSRB)
};
UCHAR
static UCHAR
dc390_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB )
{
UCHAR cmd; UCHAR disc_allowed, try_sync_nego;
......@@ -227,7 +227,7 @@ DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
#if DMA_INT
UCHAR dstatus;
#endif
DC390_AFLAGS DC390_IFLAGS; //DC390_DFLAGS
DC390_IFLAGS;
pACB = (PACB)dev_id;
for (pACB2 = dc390_pACB_start; (pACB2 && pACB2 != pACB); pACB2 = pACB2->pNextACB);
......@@ -237,26 +237,21 @@ DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
//DC390_LOCK_DRV;
sstatus = DC390_read8 (Scsi_Status);
if( !(sstatus & INTERRUPT) )
{ /*DC390_UNLOCK_DRV;*/ return IRQ_NONE; };
return IRQ_NONE;
DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus));
#if DMA_INT
DC390_LOCK_IO(pACB->pScsiHost);
DC390_LOCK_ACB;
dstatus = dc390_dma_intr (pACB);
DC390_UNLOCK_ACB;
DC390_UNLOCK_IO(pACB->pScsiHost);
DEBUG1(printk (KERN_DEBUG "dstatus=%02x,", dstatus));
if (! (dstatus & SCSI_INTERRUPT))
{
DEBUG0(printk (KERN_WARNING "DC390 Int w/o SCSI actions (only DMA?)\n"));
//DC390_UNLOCK_DRV;
return IRQ_NONE;
};
#else
......@@ -266,8 +261,6 @@ DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
#endif
DC390_LOCK_IO(pACB->pScsiHost);
DC390_LOCK_ACB;
//DC390_UNLOCK_DRV_NI; /* Allow _other_ CPUs to process IRQ (useful for shared IRQs) */
istate = DC390_read8 (Intern_State);
istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */
......@@ -339,14 +332,11 @@ DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
}
unlock:
//DC390_LOCK_DRV_NI;
DC390_UNLOCK_ACB;
DC390_UNLOCK_IO(pACB->pScsiHost);
//DC390_UNLOCK_DRV; /* Restore initial flags */
return IRQ_HANDLED;
}
irqreturn_t do_DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t do_DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
{
irqreturn_t ret;
DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq));
......@@ -356,7 +346,7 @@ irqreturn_t do_DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
return ret;
}
void
static void
dc390_DataOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
{
UCHAR sstatus;
......@@ -410,7 +400,7 @@ dc390_DataOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
}
}
void
static void
dc390_DataIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
{
UCHAR sstatus, residual, bval;
......@@ -521,7 +511,7 @@ dc390_DataIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
{
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */
}
}
}
static void
......@@ -740,9 +730,9 @@ dc390_restore_ptr (PACB pACB, PSRB pSRB)
psgl = pSRB->pSegmentList;
//dc390_pci_sync(pSRB);
while (pSRB->TotalXferredLen + (ULONG) psgl->length < pSRB->Saved_Ptr)
while (pSRB->TotalXferredLen + (ULONG) sg_dma_len(psgl) < pSRB->Saved_Ptr)
{
pSRB->TotalXferredLen += (ULONG) psgl->length;
pSRB->TotalXferredLen += (ULONG) sg_dma_len(psgl);
pSRB->SGIndex++;
if( pSRB->SGIndex < pSRB->SGcount )
{
......@@ -762,7 +752,7 @@ dc390_restore_ptr (PACB pACB, PSRB pSRB)
} else if(pcmd->request_buffer) {
//dc390_pci_sync(pSRB);
pSRB->Segmentx.length = pcmd->request_bufflen - pSRB->Saved_Ptr;
sg_dma_len(&pSRB->Segmentx) = pcmd->request_bufflen - pSRB->Saved_Ptr;
pSRB->SGcount = 1;
pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
} else {
......@@ -873,7 +863,7 @@ dc390_MsgIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
}
void
static void
dc390_DataIO_Comm( PACB pACB, PSRB pSRB, UCHAR ioDir)
{
PSGL psgl;
......@@ -885,6 +875,8 @@ dc390_DataIO_Comm( PACB pACB, PSRB pSRB, UCHAR ioDir)
if (pDCB) printk (KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n",
pDCB->TargetID, pDCB->TargetLUN);
else printk (KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n");
pSRB->pSRBDCB = pDCB;
dc390_EnableMsgOut_Abort (pACB, pSRB);
if (pDCB) pDCB->DCBFlag |= ABORT_DEV;
return;
......@@ -1150,7 +1142,6 @@ dc390_Disconnect( PACB pACB )
pSRB = psrb;
}
pDCB->pGoingSRB = 0;
dc390_Query_to_Waiting (pACB);
dc390_Waiting_process (pACB);
}
else
......@@ -1466,7 +1457,7 @@ dc390_SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB )
ptr2 = pSRB->pSegmentList;
for( i=pSRB->SGIndex; i < bval; i++)
{
swlval += ptr2->length;
swlval += sg_dma_len(ptr2);
ptr2++;
}
REMOVABLEDEBUG(printk(KERN_INFO "XferredLen=%08x,NotXferLen=%08x\n",\
......@@ -1619,20 +1610,15 @@ dc390_SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB )
pACB->scan_devices = 0;
};
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,30)
pcmd->resid = pcmd->request_bufflen - pSRB->TotalXferredLen;
#endif
if (!DCB_removed) dc390_Going_remove (pDCB, pSRB);
/* Add to free list */
dc390_Free_insert (pACB, pSRB);
DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done pid %li\n", pcmd->pid));
DC390_UNLOCK_ACB_NI;
pcmd->scsi_done (pcmd);
DC390_LOCK_ACB_NI;
dc390_Query_to_Waiting (pACB);
dc390_Waiting_process (pACB);
return;
}
......@@ -1668,9 +1654,7 @@ dc390_DoingSRB_Done( PACB pACB, PSCSICMD cmd )
/* ReleaseSRB( pDCB, pSRB ); */
DEBUG0(printk (KERN_DEBUG "DC390: DoingSRB_Done: done pid %li\n", pcmd->pid));
DC390_UNLOCK_ACB_NI;
pcmd->scsi_done( pcmd );
DC390_LOCK_ACB_NI;
#endif
psrb = psrb2;
}
......@@ -1679,7 +1663,6 @@ dc390_DoingSRB_Done( PACB pACB, PSCSICMD cmd )
pdcb->TagMask = 0;
pdcb = pdcb->pNextDCB;
} while( pdcb != pDCB );
dc390_Query_to_Waiting (pACB);
}
......
......@@ -7,7 +7,7 @@
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
* Copyright (C) 1998 - 2002 Douglas Gilbert
* Copyright (C) 1998 - 2004 Douglas Gilbert
*
* Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*
......@@ -17,27 +17,18 @@
* any later version.
*
*/
#include <linux/config.h>
static int sg_version_num = 30530; /* 2 digits for each component */
static int sg_version_num = 30531; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.31"
/*
* D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
* Then before running the program to be debugged enter:
* # echo "scsi log timeout 7" > /proc/scsi/scsi
* This will send copious output to the console and the log which
* is usually /var/log/messages. To turn off debugging enter:
* # echo "scsi log timeout 0" > /proc/scsi/scsi
* The 'timeout' token was chosen because it is relatively unused.
* The token 'hlcomplete' should be used but that triggers too
* much output from the sd device driver. To dump the current
* state of the SCSI mid level data structures enter:
* # echo "scsi dump 1" > /proc/scsi/scsi
* To dump the state of sg's data structures use:
* # cat /proc/scsi/sg/debug
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/fs.h>
......@@ -69,7 +60,7 @@ static int sg_version_num = 30530; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_str = "3.5.30 [20040124]";
static char *sg_version_date = "20040513";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
......@@ -110,7 +101,7 @@ static int sg_allow_dio = SG_ALLOW_DIO_DEF;
#define SG_SECTOR_SZ 512
#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
#define SG_DEV_ARR_LUMP 6 /* amount to over allocate sg_dev_arr by */
#define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
static int sg_add(struct class_device *);
static void sg_remove(struct class_device *);
......@@ -1333,85 +1324,44 @@ static struct class_simple * sg_sysfs_class;
static int sg_sysfs_valid = 0;
static int
sg_add(struct class_device *cl_dev)
static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
struct gendisk *disk;
Sg_device *sdp = NULL;
Sg_device *sdp;
unsigned long iflags;
struct cdev * cdev = NULL;
void *old_sg_dev_arr = NULL;
int k, error;
disk = alloc_disk(1);
if (!disk)
sdp = vmalloc(sizeof(Sg_device));
if (!sdp)
return -ENOMEM;
cdev = cdev_alloc();
if (! cdev)
return -ENOMEM;
write_lock_irqsave(&sg_dev_arr_lock, iflags);
if (sg_nr_dev >= sg_dev_max) { /* try to resize */
if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */
Sg_device **tmp_da;
int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
tmp_da = (Sg_device **)vmalloc(
tmp_dev_max * sizeof(Sg_device *));
if (NULL == tmp_da) {
printk(KERN_ERR
"sg_add: device array cannot be resized\n");
error = -ENOMEM;
goto out;
}
tmp_da = vmalloc(tmp_dev_max * sizeof(Sg_device *));
if (unlikely(!tmp_da))
goto expand_failed;
write_lock_irqsave(&sg_dev_arr_lock, iflags);
memset(tmp_da, 0, tmp_dev_max * sizeof (Sg_device *));
memcpy(tmp_da, sg_dev_arr,
sg_dev_max * sizeof (Sg_device *));
vfree((char *) sg_dev_arr);
memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
old_sg_dev_arr = sg_dev_arr;
sg_dev_arr = tmp_da;
sg_dev_max = tmp_dev_max;
}
find_empty_slot:
for (k = 0; k < sg_dev_max; k++)
if (!sg_dev_arr[k])
break;
if (k >= SG_MAX_DEVS) {
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
printk(KERN_WARNING
"Unable to attach sg device <%d, %d, %d, %d>"
" type=%d, minor number exceeds %d\n",
scsidp->host->host_no, scsidp->channel, scsidp->id,
scsidp->lun, scsidp->type, SG_MAX_DEVS - 1);
if (NULL != sdp)
vfree((char *) sdp);
error = -ENODEV;
goto out;
}
if (k < sg_dev_max) {
if (NULL == sdp) {
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
sdp = (Sg_device *)vmalloc(sizeof(Sg_device));
write_lock_irqsave(&sg_dev_arr_lock, iflags);
if (!sg_dev_arr[k])
goto find_empty_slot;
}
} else
sdp = NULL;
if (NULL == sdp) {
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
printk(KERN_ERR "sg_add: Sg_device cannot be allocated\n");
error = -ENOMEM;
goto out;
}
if (unlikely(k >= SG_MAX_DEVS))
goto overflow;
SCSI_LOG_TIMEOUT(3, printk("sg_add: dev=%d \n", k));
memset(sdp, 0, sizeof(*sdp));
SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
disk->major = SCSI_GENERIC_MAJOR;
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
......@@ -1421,6 +1371,55 @@ sg_add(struct class_device *cl_dev)
sg_nr_dev++;
sg_dev_arr[k] = sdp;
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
error = k;
out:
if (error < 0)
vfree(sdp);
vfree(old_sg_dev_arr);
return error;
expand_failed:
printk(KERN_ERR "sg_alloc: device array cannot be resized\n");
error = -ENOMEM;
goto out;
overflow:
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
printk(KERN_WARNING
"Unable to attach sg device <%d, %d, %d, %d> type=%d, minor "
"number exceeds %d\n", scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
goto out;
}
static int
sg_add(struct class_device *cl_dev)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error, k;
disk = alloc_disk(1);
if (!disk)
return -ENOMEM;
disk->major = SCSI_GENERIC_MAJOR;
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev)
goto out;
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
error = sg_alloc(disk, scsidp);
if (error < 0)
goto out;
k = error;
sdp = sg_dev_arr[k];
devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
......@@ -1543,6 +1542,7 @@ module_param_named(allow_dio, sg_allow_dio, int, 0);
MODULE_AUTHOR("Douglas Gilbert");
MODULE_DESCRIPTION("SCSI generic (sg) driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SG_VERSION_STR);
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
......@@ -2844,7 +2844,8 @@ sg_proc_write_dressz(struct file *filp, const char __user *buffer,
static int sg_proc_seq_show_version(struct seq_file *s, void *v)
{
seq_printf(s, "%d\t%s\n", sg_version_num, sg_version_str);
seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
sg_version_date);
return 0;
}
......
......@@ -486,7 +486,7 @@ static int cross_eof(Scsi_Tape * STp, int forward)
tape_name(STp), forward ? "forward" : "backward"));
SRpnt = st_do_scsi(NULL, STp, cmd, 0, SCSI_DATA_NONE,
STp->timeout, MAX_RETRIES, TRUE);
STp->device->timeout, MAX_RETRIES, TRUE);
if (!SRpnt)
return (STp->buffer)->syscall_result;
......@@ -544,7 +544,7 @@ static int flush_write_buffer(Scsi_Tape * STp)
cmd[4] = blks;
SRpnt = st_do_scsi(NULL, STp, cmd, transfer, SCSI_DATA_WRITE,
STp->timeout, MAX_WRITE_RETRIES, TRUE);
STp->device->timeout, MAX_WRITE_RETRIES, TRUE);
if (!SRpnt)
return (STp->buffer)->syscall_result;
......@@ -867,7 +867,7 @@ static int check_tape(Scsi_Tape *STp, struct file *filp)
memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
cmd[0] = READ_BLOCK_LIMITS;
SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, SCSI_DATA_READ, STp->timeout,
SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, SCSI_DATA_READ, STp->device->timeout,
MAX_READY_RETRIES, TRUE);
if (!SRpnt) {
retval = (STp->buffer)->syscall_result;
......@@ -894,7 +894,7 @@ static int check_tape(Scsi_Tape *STp, struct file *filp)
cmd[0] = MODE_SENSE;
cmd[4] = 12;
SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, SCSI_DATA_READ, STp->timeout,
SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, SCSI_DATA_READ, STp->device->timeout,
MAX_READY_RETRIES, TRUE);
if (!SRpnt) {
retval = (STp->buffer)->syscall_result;
......@@ -1116,7 +1116,7 @@ static int st_flush(struct file *filp)
cmd[4] = 1 + STp->two_fm;
SRpnt = st_do_scsi(NULL, STp, cmd, 0, SCSI_DATA_NONE,
STp->timeout, MAX_WRITE_RETRIES, TRUE);
STp->device->timeout, MAX_WRITE_RETRIES, TRUE);
if (!SRpnt) {
result = (STp->buffer)->syscall_result;
goto out;
......@@ -1509,7 +1509,7 @@ static ssize_t
cmd[4] = blks;
SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, SCSI_DATA_WRITE,
STp->timeout, MAX_WRITE_RETRIES, !async_write);
STp->device->timeout, MAX_WRITE_RETRIES, !async_write);
if (!SRpnt) {
retval = STbp->syscall_result;
goto out;
......@@ -1679,7 +1679,7 @@ static long read_tape(Scsi_Tape *STp, long count, Scsi_Request ** aSRpnt)
SRpnt = *aSRpnt;
SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, SCSI_DATA_READ,
STp->timeout, MAX_RETRIES, TRUE);
STp->device->timeout, MAX_RETRIES, TRUE);
release_buffering(STp);
*aSRpnt = SRpnt;
if (!SRpnt)
......@@ -2081,7 +2081,7 @@ static int st_set_options(Scsi_Tape *STp, long options)
DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
(value & ~MT_ST_SET_LONG_TIMEOUT)));
} else {
STp->timeout = value * HZ;
STp->device->timeout = value * HZ;
DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
name, value) );
}
......@@ -2189,7 +2189,7 @@ static int read_mode_page(Scsi_Tape *STp, int page, int omit_block_descs)
cmd[4] = 255;
SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_READ,
STp->timeout, 0, TRUE);
STp->device->timeout, 0, TRUE);
if (SRpnt == NULL)
return (STp->buffer)->syscall_result;
......@@ -2220,7 +2220,7 @@ static int write_mode_page(Scsi_Tape *STp, int page, int slow)
(STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], SCSI_DATA_WRITE,
(slow ? STp->long_timeout : STp->timeout), 0, TRUE);
(slow ? STp->long_timeout : STp->device->timeout), 0, TRUE);
if (SRpnt == NULL)
return (STp->buffer)->syscall_result;
......@@ -2332,7 +2332,7 @@ static int do_load_unload(Scsi_Tape *STp, struct file *filp, int load_code)
}
if (STp->immediate) {
cmd[1] = 1; /* Don't wait for completion */
timeout = STp->timeout;
timeout = STp->device->timeout;
}
else
timeout = STp->long_timeout;
......@@ -2512,7 +2512,7 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
cmd[4] = arg;
timeout = STp->timeout;
timeout = STp->device->timeout;
DEBC(
if (cmd_in == MTWEOF)
printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
......@@ -2530,7 +2530,7 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
cmd[0] = REZERO_UNIT;
if (STp->immediate) {
cmd[1] = 1; /* Don't wait for completion */
timeout = STp->timeout;
timeout = STp->device->timeout;
}
DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
fileno = blkno = at_sm = 0;
......@@ -2543,7 +2543,7 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
cmd[0] = START_STOP;
if (STp->immediate) {
cmd[1] = 1; /* Don't wait for completion */
timeout = STp->timeout;
timeout = STp->device->timeout;
}
cmd[4] = 3;
DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
......@@ -2576,7 +2576,7 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */
if (STp->immediate) {
cmd[1] |= 2; /* Don't wait for completion */
timeout = STp->timeout;
timeout = STp->device->timeout;
}
else
timeout = STp->long_timeout * 8;
......@@ -2628,7 +2628,7 @@ static int st_int_ioctl(Scsi_Tape *STp, unsigned int cmd_in, unsigned long arg)
(STp->buffer)->b_data[9] = (ltmp >> 16);
(STp->buffer)->b_data[10] = (ltmp >> 8);
(STp->buffer)->b_data[11] = ltmp;
timeout = STp->timeout;
timeout = STp->device->timeout;
DEBC(
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
printk(ST_DEB_MSG
......@@ -2809,7 +2809,7 @@ static int get_location(Scsi_Tape *STp, unsigned int *block, int *partition,
if (!logical && !STp->scsi2_logical)
scmd[1] = 1;
}
SRpnt = st_do_scsi(NULL, STp, scmd, 20, SCSI_DATA_READ, STp->timeout,
SRpnt = st_do_scsi(NULL, STp, scmd, 20, SCSI_DATA_READ, STp->device->timeout,
MAX_READY_RETRIES, TRUE);
if (!SRpnt)
return (STp->buffer)->syscall_result;
......@@ -2911,7 +2911,7 @@ static int set_location(Scsi_Tape *STp, unsigned int block, int partition,
}
if (STp->immediate) {
scmd[1] |= 1; /* Don't wait for completion */
timeout = STp->timeout;
timeout = STp->device->timeout;
}
SRpnt = st_do_scsi(NULL, STp, scmd, 0, SCSI_DATA_NONE,
......@@ -3408,11 +3408,17 @@ static int st_ioctl(struct inode *inode, struct file *file,
goto out;
}
up(&STp->lock);
i = scsi_cmd_ioctl(STp->disk, cmd_in, arg);
if (i != -ENOTTY)
return i;
else
return scsi_ioctl(STp->device, cmd_in, (void *) arg);
switch (cmd_in) {
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
break;
default:
i = scsi_cmd_ioctl(STp->disk, cmd_in, arg);
if (i != -ENOTTY)
return i;
break;
}
return scsi_ioctl(STp->device, cmd_in, (void *) arg);
out:
up(&STp->lock);
......@@ -3832,7 +3838,7 @@ static int st_probe(struct device *dev)
tpnt->partition = 0;
tpnt->new_partition = 0;
tpnt->nbr_partitions = 0;
tpnt->timeout = ST_TIMEOUT;
tpnt->device->timeout = ST_TIMEOUT;
tpnt->long_timeout = ST_LONG_TIMEOUT;
tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
......
......@@ -100,7 +100,6 @@ typedef struct {
unsigned char c_algo; /* compression algorithm */
unsigned char pos_unknown; /* after reset position unknown */
int tape_type;
int timeout; /* timeout for normal commands */
int long_timeout; /* timeout for commands known to take long time */
unsigned long max_pfn; /* the maximum page number reachable by the HBA */
......
This diff is collapsed.
......@@ -214,16 +214,12 @@ PSRB pFreeSRB;
PSRB pTmpSRB;
/* 0x2c: */
ULONG QueryCnt;
struct list_head cmdq;
/* 0x38: */
UCHAR msgin123[4];
UCHAR DCBmap[MAX_SCSI_ID];
UCHAR Connected;
UCHAR pad;
/* 0x3c: */
/* 0x30: */
#if defined(USE_SPINLOCKS) && USE_SPINLOCKS > 1 && (defined(CONFIG_SMP) || DEBUG_SPINLOCKS > 0)
spinlock_t lock;
#endif
......@@ -234,20 +230,20 @@ UCHAR MsgLen;
UCHAR Ignore_IRQ; /* Not used */
PDEVDECL1; /* Pointer to PCI cfg. space */
/* 0x4c/0x48: */
/* 0x40/0x3c: */
ULONG Cmds;
UINT SelLost;
UINT SelConn;
UINT CmdInQ;
UINT CmdOutOfSRB;
/* 0x60/0x5c: */
/* 0x54/0x50: */
struct timer_list Waiting_Timer;
/* 0x74/0x70: */
/* 0x68/0x64: */
DC390_SRB TmpSRB;
/* 0xd8/0xd4: */
/* 0xcc/0xc8: */
DC390_SRB SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
/* 0xfb0/0xfac: */
/* 0xfa4/0xfa0: */
};
typedef struct _ACB DC390_ACB, *PACB;
......@@ -406,16 +402,9 @@ typedef struct _ACB DC390_ACB, *PACB;
* SISC query queue
*/
typedef struct {
struct list_head list;
dma_addr_t saved_dma_handle;
} dc390_cmd_scp_t;
struct scsi_cmnd_list
{
char dummy[offsetof(struct scsi_cmnd, SCp)];
dc390_cmd_scp_t scp;
};
/*
** Inquiry Data format
*/
......
......@@ -165,6 +165,7 @@
* Removed now obsolete wd7000.h
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
......@@ -846,7 +847,7 @@ static inline int command_out(Adapter * host, unchar * cmd, int len)
static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed)
{
register Scb *scb, *p = NULL;
register unsigned long flags;
unsigned long flags;
register unsigned long timeout = jiffies + WAITnexttimeout;
register unsigned long now;
int i;
......@@ -898,7 +899,7 @@ static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed)
static inline void free_scb(Scb * scb)
{
register unsigned long flags;
unsigned long flags;
spin_lock_irqsave(&scbpool_lock, flags);
......@@ -936,7 +937,7 @@ static int mail_out(Adapter * host, Scb * scbptr)
*/
{
register int i, ogmb;
register unsigned long flags;
unsigned long flags;
unchar start_ogmb;
Mailbox *ogmbs = host->mb.ogmb;
int *next_ogmb = &(host->next_ogmb);
......
......@@ -4,7 +4,8 @@
* Flags for SCSI devices that need special treatment
*/
#define BLIST_NOLUN 0x001 /* Only scan LUN 0 */
#define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning */
#define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning,
deprecated: Use max_luns=N */
#define BLIST_BORKEN 0x004 /* Flag for broken handshaking */
#define BLIST_KEY 0x008 /* unlock by special command */
#define BLIST_SINGLELUN 0x010 /* Do not use LUNs in parallel */
......@@ -20,4 +21,7 @@
#define BLIST_MS_SKIP_PAGE_3F 0x4000 /* do not send ms page 0x3f */
#define BLIST_USE_10_BYTE_MS 0x8000 /* use 10 byte ms before 6 byte ms */
#define BLIST_MS_192_BYTES_FOR_3F 0x10000 /* 192 byte ms page 0x3f request */
#define BLIST_REPORTLUN2 0x20000 /* try REPORT_LUNS even for SCSI-2 devs
(if HBA supports more than 8 LUNs) */
#define BLIST_NOREPORTLUN 0x40000 /* don't try REPORT_LUNS scan (SCSI-3 devs) */
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment