Commit d2b91c2d authored by James Bottomley's avatar James Bottomley

Update megaraid to version 2.20.3.1

From: 	Mukker, Atul <Atulm@lsil.com>

i.      Function reordering so that inline functions are defined before they
        are actually used. It is now mandatory for GCC 3.4.1 (current
stable)

        Declare some heavy-weight functions to be non-inlined,
        megaraid_mbox_build_cmd, megaraid_mbox_runpendq,
        megaraid_mbox_prepare_pthru, megaraid_mbox_prepare_epthru,
        megaraid_busywait_mbox

                - Andrew Morton <akpm@osdl.org>, 08.19.2004
                linux-scsi mailing list

        "Something else to clean up after inclusion: every instance of an
        inline function is actually rendered as a full function call,
because
        the function is always used before it is defined.  Atul, please
        re-arrange the code to eliminate the need for most (all) of the
        function prototypes at the top of each file, and define (not just
        declare with a prototype) each inline function before its first use"

                - Matt Domsch <Matt_Domsch@dell.com>, 07.27.2004
                linux-scsi mailing list


ii.     Display elapsed time (countdown) while waiting for FW to boot.

iii.    Module compilation reorder in Makefile so that unresolved symbols do
        not occur when driver is compiled non-modular.

                Patrick J. LoPresti <patl@users.sourceforge.net>, 8.22.2004
                linux-scsi mailing list
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent db339a00
Release Date : Tue Aug 24 09:43:35 EDT 2004 - Atul Mukker <atulm@lsil.com>
Current Version : 2.20.3.1 (scsi module), 2.20.2.0 (cmm module)
Older Version : 2.20.3.0 (scsi module), 2.20.2.0 (cmm module)
i. Function reordering so that inline functions are defined before they
are actually used. It is now mandatory for GCC 3.4.1 (current stable)
Declare some heavy-weight functions to be non-inlined,
megaraid_mbox_build_cmd, megaraid_mbox_runpendq,
megaraid_mbox_prepare_pthru, megaraid_mbox_prepare_epthru,
megaraid_busywait_mbox
- Andrew Morton <akpm@osdl.org>, 08.19.2004
linux-scsi mailing list
"Something else to clean up after inclusion: every instance of an
inline function is actually rendered as a full function call, because
the function is always used before it is defined. Atul, please
re-arrange the code to eliminate the need for most (all) of the
function prototypes at the top of each file, and define (not just
declare with a prototype) each inline function before its first use"
- Matt Domsch <Matt_Domsch@dell.com>, 07.27.2004
linux-scsi mailing list
ii. Display elapsed time (countdown) while waiting for FW to boot.
iii. Module compilation reorder in Makefile so that unresolved symbols do
not occur when driver is compiled non-modular.
Patrick J. LoPresti <patl@users.sourceforge.net>, 8.22.2004
linux-scsi mailing list
Release Date : Thu Aug 19 09:58:33 EDT 2004 - Atul Mukker <atulm@lsil.com> Release Date : Thu Aug 19 09:58:33 EDT 2004 - Atul Mukker <atulm@lsil.com>
Current Version : 2.20.3.0 (scsi module), 2.20.2.0 (cmm module) Current Version : 2.20.3.0 (scsi module), 2.20.2.0 (cmm module)
Older Version : 2.20.2.0 (scsi module), 2.20.1.0 (cmm module) Older Version : 2.20.2.0 (scsi module), 2.20.1.0 (cmm module)
......
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* FILE : megaraid.c * FILE : megaraid_mbox.c
* Version : v2.20.2.0 (July 22 2004) * Version : v2.20.3.1 (August 24 2004)
* *
* Authors: * Authors:
* Atul Mukker <Atul.Mukker@lsil.com> * Atul Mukker <Atul.Mukker@lsil.com>
...@@ -82,24 +82,25 @@ static void megaraid_mbox_shutdown(struct device *); ...@@ -82,24 +82,25 @@ static void megaraid_mbox_shutdown(struct device *);
static int megaraid_io_attach(adapter_t *); static int megaraid_io_attach(adapter_t *);
static void megaraid_io_detach(adapter_t *); static void megaraid_io_detach(adapter_t *);
static int megaraid_init_mbox(adapter_t *);
static void megaraid_fini_mbox(adapter_t *);
static int megaraid_alloc_cmd_packets(adapter_t *); static int megaraid_alloc_cmd_packets(adapter_t *);
static void megaraid_free_cmd_packets(adapter_t *); static void megaraid_free_cmd_packets(adapter_t *);
static int megaraid_mbox_setup_dma_pools(adapter_t *); static int megaraid_mbox_setup_dma_pools(adapter_t *);
static void megaraid_mbox_teardown_dma_pools(adapter_t *); static void megaraid_mbox_teardown_dma_pools(adapter_t *);
static int megaraid_init_mbox(adapter_t *);
static void megaraid_fini_mbox(adapter_t *);
static int megaraid_abort_handler(struct scsi_cmnd *); static int megaraid_abort_handler(struct scsi_cmnd *);
static int megaraid_reset_handler(struct scsi_cmnd *); static int megaraid_reset_handler(struct scsi_cmnd *);
static int mbox_post_sync_cmd(adapter_t *, uint8_t []); static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []); static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
static int megaraid_busywait_mbox(mraid_device_t *);
static int megaraid_mbox_product_info(adapter_t *); static int megaraid_mbox_product_info(adapter_t *);
static int megaraid_mbox_extended_cdb(adapter_t *); static int megaraid_mbox_extended_cdb(adapter_t *);
static int megaraid_mbox_support_random_del(adapter_t *);
static int megaraid_mbox_support_ha(adapter_t *, uint16_t *); static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
static int megaraid_mbox_support_random_del(adapter_t *);
static int megaraid_mbox_get_max_sg(adapter_t *); static int megaraid_mbox_get_max_sg(adapter_t *);
static void megaraid_mbox_enum_raid_scsi(adapter_t *); static void megaraid_mbox_enum_raid_scsi(adapter_t *);
static void megaraid_mbox_flush_cache(adapter_t *); static void megaraid_mbox_flush_cache(adapter_t *);
...@@ -109,27 +110,16 @@ static void megaraid_mbox_setup_device_map(adapter_t *); ...@@ -109,27 +110,16 @@ static void megaraid_mbox_setup_device_map(adapter_t *);
static int megaraid_queue_command(struct scsi_cmnd *, static int megaraid_queue_command(struct scsi_cmnd *,
void (*)(struct scsi_cmnd *)); void (*)(struct scsi_cmnd *));
static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static inline scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
int *); static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
static inline scb_t *megaraid_alloc_scb(adapter_t *, struct scsi_cmnd *);
static inline void megaraid_dealloc_scb(adapter_t *, scb_t *);
static inline void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
struct scsi_cmnd *); struct scsi_cmnd *);
static inline void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *, static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
struct scsi_cmnd *); struct scsi_cmnd *);
static inline int megaraid_mbox_mksgl(adapter_t *, scb_t *);
static inline void megaraid_mbox_runpendq(adapter_t *, scb_t *);
static inline int mbox_post_cmd(adapter_t *, scb_t *);
static void megaraid_mbox_dpc(unsigned long);
static inline void megaraid_mbox_sync_scb(adapter_t *, scb_t *);
static irqreturn_t megaraid_isr(int, void *, struct pt_regs *); static irqreturn_t megaraid_isr(int, void *, struct pt_regs *);
static inline int megaraid_ack_sequence(adapter_t *);
static inline int megaraid_busywait_mbox(mraid_device_t *); static void megaraid_mbox_dpc(unsigned long);
static int megaraid_cmm_register(adapter_t *); static int megaraid_cmm_register(adapter_t *);
static int megaraid_cmm_unregister(adapter_t *); static int megaraid_cmm_unregister(adapter_t *);
...@@ -771,7 +761,7 @@ megaraid_io_attach(adapter_t *adapter) ...@@ -771,7 +761,7 @@ megaraid_io_attach(adapter_t *adapter)
return -1; return -1;
} }
SCSIHOST2ADAP(host) = (caddr_t )adapter; SCSIHOST2ADAP(host) = (caddr_t)adapter;
adapter->host = host; adapter->host = host;
// export the parameters required by the mid-layer // export the parameters required by the mid-layer
...@@ -1376,6 +1366,217 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter) ...@@ -1376,6 +1366,217 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
} }
/**
* megaraid_alloc_scb - detach and return a scb from the free list
* @adapter : controller's soft state
*
* return the scb from the head of the free list. NULL if there are none
* available
**/
static inline scb_t *
megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
{
struct list_head *head = &adapter->kscb_pool;
scb_t *scb = NULL;
unsigned long flags;
// detach scb from free pool
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
if (list_empty(head)) {
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
return NULL;
}
scb = list_entry(head->next, scb_t, list);
list_del_init(&scb->list);
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
scb->state = SCB_ACTIVE;
scb->scp = scp;
scb->dma_type = MRAID_DMA_NONE;
return scb;
}
/**
* megaraid_dealloc_scb - return the scb to the free pool
* @adapter : controller's soft state
* @scb : scb to be freed
*
* return the scb back to the free list of scbs. The caller must 'flush' the
* SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
* NOTE NOTE: Make sure the scb is not on any list before calling this
* routine.
**/
static inline void
megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
{
unsigned long flags;
// put scb in the free pool
scb->state = SCB_FREE;
scb->scp = NULL;
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
list_add(&scb->list, &adapter->kscb_pool);
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
return;
}
/**
* megaraid_mbox_mksgl - make the scatter-gather list
* @adapter - controller's soft state
* @scb - scsi control block
*
* prepare the scatter-gather list
*/
static inline int
megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
{
struct scatterlist *sgl;
mbox_ccb_t *ccb;
struct page *page;
unsigned long offset;
struct scsi_cmnd *scp;
int sgcnt;
int i;
scp = scb->scp;
ccb = (mbox_ccb_t *)scb->ccb;
// no mapping required if no data to be transferred
if (!scp->request_buffer || !scp->request_bufflen)
return 0;
if (!scp->use_sg) { /* scatter-gather list not used */
page = virt_to_page(scp->request_buffer);
offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
scp->request_bufflen,
scb->dma_direction);
scb->dma_type = MRAID_DMA_WBUF;
/*
* We need to handle special 64-bit commands that need a
* minimum of 1 SG
*/
sgcnt = 1;
ccb->sgl64[0].address = ccb->buf_dma_h;
ccb->sgl64[0].length = scp->request_bufflen;
return sgcnt;
}
sgl = (struct scatterlist *)scp->request_buffer;
// The number of sg elements returned must not exceed our limit
sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
scb->dma_direction);
if (sgcnt > adapter->sglen) {
con_log(CL_ANN, (KERN_CRIT
"megaraid critical: too many sg elements:%d\n",
sgcnt));
BUG();
}
scb->dma_type = MRAID_DMA_WSG;
for (i = 0; i < sgcnt; i++, sgl++) {
ccb->sgl64[i].address = sg_dma_address(sgl);
ccb->sgl64[i].length = sg_dma_len(sgl);
}
// Return count of SG nodes
return sgcnt;
}
/**
* mbox_post_cmd - issue a mailbox command
* @adapter - controller's soft state
* @scb - command to be issued
*
* post the command to the controller if mailbox is availble.
*/
static inline int
mbox_post_cmd(adapter_t *adapter, scb_t *scb)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox64_t *mbox64;
mbox_t *mbox;
mbox_ccb_t *ccb;
unsigned long flags;
unsigned int i = 0;
ccb = (mbox_ccb_t *)scb->ccb;
mbox = raid_dev->mbox;
mbox64 = raid_dev->mbox64;
/*
* Check for busy mailbox. If it is, return failure - the caller
* should retry later.
*/
spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
if (unlikely(mbox->busy)) {
do {
udelay(1);
i++;
rmb();
} while(mbox->busy && (i < max_mbox_busy_wait));
if (mbox->busy) {
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
return -1;
}
}
// Copy this command's mailbox data into "adapter's" mailbox
memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
mbox->cmdid = scb->sno;
adapter->outstanding_cmds++;
if (scb->dma_direction == PCI_DMA_TODEVICE) {
if (!scb->scp->use_sg) { // sg list not used
pci_dma_sync_single(adapter->pdev, ccb->buf_dma_h,
scb->scp->request_bufflen,
PCI_DMA_TODEVICE);
}
else {
pci_dma_sync_sg(adapter->pdev, scb->scp->request_buffer,
scb->scp->use_sg, PCI_DMA_TODEVICE);
}
}
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
return 0;
}
/** /**
* megaraid_queue_command - generic queue entry point for all LLDs * megaraid_queue_command - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed * @scp : pointer to the scsi command to be executed
...@@ -1434,7 +1635,7 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)) ...@@ -1434,7 +1635,7 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *))
* convert the command issued by mid-layer to format understood by megaraid * convert the command issued by mid-layer to format understood by megaraid
* firmware. We also complete certain command without sending them to firmware * firmware. We also complete certain command without sending them to firmware
*/ */
static inline scb_t * static scb_t *
megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
{ {
mraid_device_t *rdev = ADAP2RAIDDEV(adapter); mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
...@@ -1803,63 +2004,76 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) ...@@ -1803,63 +2004,76 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
/** /**
* megaraid_alloc_scb - detach and return a scb from the free list * megaraid_mbox_runpendq - execute commands queued in the pending queue
* @adapter : controller's soft state * @adapter : controller's soft state
* @scb : SCB to be queued in the pending list
* *
* return the scb from the head of the free list. NULL if there are none * scan the pending list for commands which are not yet issued and try to
* available * post to the controller. The SCB can be a null pointer, which would indicate
**/ * no SCB to be queue, just try to execute the ones in the pending list.
static inline scb_t * *
megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) * NOTE: We do not actually traverse the pending list. The SCBs are plucked
* out from the head of the pending list. If it is successfully issued, the
* next SCB is at the head now.
*/
static void
megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
{ {
struct list_head *head = &adapter->kscb_pool; scb_t *scb;
scb_t *scb = NULL;
unsigned long flags; unsigned long flags;
// detach scb from free pool spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
if (list_empty(head)) {
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
return NULL;
}
scb = list_entry(head->next, scb_t, list); if (scb_q) {
list_del_init(&scb->list); scb_q->state = SCB_PENDQ;
list_add_tail(&scb_q->list, &adapter->pend_list);
}
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); // if the adapter in not in quiescent mode, post the commands to FW
if (adapter->quiescent) {
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
return;
}
scb->state = SCB_ACTIVE; while (!list_empty(&adapter->pend_list)) {
scb->scp = scp;
scb->dma_type = MRAID_DMA_NONE;
return scb; ASSERT(spin_is_locked(PENDING_LIST_LOCK(adapter)));
}
scb = list_entry(adapter->pend_list.next, scb_t, list);
/** // remove the scb from the pending list and try to
* megaraid_dealloc_scb - return the scb to the free pool // issue. If we are unable to issue it, put back in
* @adapter : controller's soft state // the pending list and return
* @scb : scb to be freed
*
* return the scb back to the free list of scbs. The caller must 'flush' the
* SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
* NOTE NOTE: Make sure the scb is not on any list before calling this
* routine.
**/
static inline void
megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
{
unsigned long flags;
// put scb in the free pool list_del_init(&scb->list);
scb->state = SCB_FREE;
scb->scp = NULL;
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
list_add(&scb->list, &adapter->kscb_pool); spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
// if mailbox was busy, return SCB back to pending
// list. Make sure to add at the head, since that's
// where it would have been removed from
scb->state = SCB_ISSUED;
if (mbox_post_cmd(adapter, scb) != 0) {
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
scb->state = SCB_PENDQ;
list_add(&scb->list, &adapter->pend_list);
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
flags);
return;
}
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
return; return;
} }
...@@ -1873,7 +2087,7 @@ megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) ...@@ -1873,7 +2087,7 @@ megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
* *
* prepare a command for the scsi physical devices * prepare a command for the scsi physical devices
*/ */
static inline void static void
megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
struct scsi_cmnd *scp) struct scsi_cmnd *scp)
{ {
...@@ -1921,7 +2135,7 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, ...@@ -1921,7 +2135,7 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
* prepare a command for the scsi physical devices. This rountine prepares * prepare a command for the scsi physical devices. This rountine prepares
* commands for devices which can take extended CDBs (>10 bytes) * commands for devices which can take extended CDBs (>10 bytes)
*/ */
static inline void static void
megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
struct scsi_cmnd *scp) struct scsi_cmnd *scp)
{ {
...@@ -1960,255 +2174,6 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, ...@@ -1960,255 +2174,6 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
} }
/**
* megaraid_mbox_mksgl - make the scatter-gather list
* @adapter - controller's soft state
* @scb - scsi control block
*
* prepare the scatter-gather list
*/
static inline int
megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
{
struct scatterlist *sgl;
mbox_ccb_t *ccb;
struct page *page;
unsigned long offset;
struct scsi_cmnd *scp;
int sgcnt;
int i;
scp = scb->scp;
ccb = (mbox_ccb_t *)scb->ccb;
// no mapping required if no data to be transferred
if (!scp->request_buffer || !scp->request_bufflen)
return 0;
if (!scp->use_sg) { /* scatter-gather list not used */
page = virt_to_page(scp->request_buffer);
offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
scp->request_bufflen,
scb->dma_direction);
scb->dma_type = MRAID_DMA_WBUF;
/*
* We need to handle special 64-bit commands that need a
* minimum of 1 SG
*/
sgcnt = 1;
ccb->sgl64[0].address = ccb->buf_dma_h;
ccb->sgl64[0].length = scp->request_bufflen;
return sgcnt;
}
sgl = (struct scatterlist *)scp->request_buffer;
// The number of sg elements returned must not exceed our limit
sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
scb->dma_direction);
if (sgcnt > adapter->sglen) {
con_log(CL_ANN, (KERN_CRIT
"megaraid critical: too many sg elements:%d\n",
sgcnt));
BUG();
}
scb->dma_type = MRAID_DMA_WSG;
for (i = 0; i < sgcnt; i++, sgl++) {
ccb->sgl64[i].address = sg_dma_address(sgl);
ccb->sgl64[i].length = sg_dma_len(sgl);
}
// Return count of SG nodes
return sgcnt;
}
/**
* megaraid_mbox_runpendq - execute commands queued in the pending queue
* @adapter : controller's soft state
* @scb : SCB to be queued in the pending list
*
* scan the pending list for commands which are not yet issued and try to
* post to the controller. The SCB can be a null pointer, which would indicate
* no SCB to be queue, just try to execute the ones in the pending list.
*
* NOTE: We do not actually traverse the pending list. The SCBs are plucked
* out from the head of the pending list. If it is successfully issued, the
* next SCB is at the head now.
*/
static inline void
megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
{
scb_t *scb;
unsigned long flags;
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
if (scb_q) {
scb_q->state = SCB_PENDQ;
list_add_tail(&scb_q->list, &adapter->pend_list);
}
// if the adapter in not in quiescent mode, post the commands to FW
if (adapter->quiescent) {
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
return;
}
while (!list_empty(&adapter->pend_list)) {
ASSERT(spin_is_locked(PENDING_LIST_LOCK(adapter)));
scb = list_entry(adapter->pend_list.next, scb_t, list);
// remove the scb from the pending list and try to
// issue. If we are unable to issue it, put back in
// the pending list and return
list_del_init(&scb->list);
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
// if mailbox was busy, return SCB back to pending
// list. Make sure to add at the head, since that's
// where it would have been removed from
scb->state = SCB_ISSUED;
if (mbox_post_cmd(adapter, scb) != 0) {
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
scb->state = SCB_PENDQ;
list_add(&scb->list, &adapter->pend_list);
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
flags);
return;
}
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
return;
}
/**
* mbox_post_cmd - issue a mailbox command
* @adapter - controller's soft state
* @scb - command to be issued
*
* post the command to the controller if mailbox is availble.
*/
static inline int
mbox_post_cmd(adapter_t *adapter, scb_t *scb)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox64_t *mbox64;
mbox_t *mbox;
mbox_ccb_t *ccb;
unsigned long flags;
unsigned int i = 0;
ccb = (mbox_ccb_t *)scb->ccb;
mbox = raid_dev->mbox;
mbox64 = raid_dev->mbox64;
/*
* Check for busy mailbox. If it is, return failure - the caller
* should retry later.
*/
spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
if (unlikely(mbox->busy)) {
do {
udelay(1);
i++;
rmb();
} while(mbox->busy && (i < max_mbox_busy_wait));
if (mbox->busy) {
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
return -1;
}
}
// Copy this command's mailbox data into "adapter's" mailbox
memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
mbox->cmdid = scb->sno;
adapter->outstanding_cmds++;
if (scb->dma_direction == PCI_DMA_TODEVICE) {
if (!scb->scp->use_sg) { // sg list not used
pci_dma_sync_single(adapter->pdev, ccb->buf_dma_h,
scb->scp->request_bufflen,
PCI_DMA_TODEVICE);
}
else {
pci_dma_sync_sg(adapter->pdev, scb->scp->request_buffer,
scb->scp->use_sg, PCI_DMA_TODEVICE);
}
}
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
return 0;
}
/**
* megaraid_isr - isr for memory based mailbox based controllers
* @irq - irq
* @devp - pointer to our soft state
* @regs - unused
*
* Interrupt service routine for memory-mapped mailbox controllers.
*/
static irqreturn_t
megaraid_isr(int irq, void *devp, struct pt_regs *regs)
{
adapter_t *adapter = devp;
int handled;
handled = megaraid_ack_sequence(adapter);
/* Loop through any pending requests */
if (!adapter->quiescent) {
megaraid_mbox_runpendq(adapter, 0);
}
return IRQ_RETVAL(handled);
}
/** /**
* megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
* @adapter - controller's soft state * @adapter - controller's soft state
...@@ -2324,6 +2289,79 @@ megaraid_ack_sequence(adapter_t *adapter) ...@@ -2324,6 +2289,79 @@ megaraid_ack_sequence(adapter_t *adapter)
} }
/**
* megaraid_isr - isr for memory based mailbox based controllers
* @irq - irq
* @devp - pointer to our soft state
* @regs - unused
*
* Interrupt service routine for memory-mapped mailbox controllers.
*/
static irqreturn_t
megaraid_isr(int irq, void *devp, struct pt_regs *regs)
{
adapter_t *adapter = devp;
int handled;
handled = megaraid_ack_sequence(adapter);
/* Loop through any pending requests */
if (!adapter->quiescent) {
megaraid_mbox_runpendq(adapter, 0);
}
return IRQ_RETVAL(handled);
}
/**
* megaraid_mbox_sync_scb - sync kernel buffers
* @adapter : controller's soft state
* @scb : pointer to the resource packet
*
* DMA sync if required.
*/
static inline void
megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
{
mbox_ccb_t *ccb;
ccb = (mbox_ccb_t *)scb->ccb;
switch (scb->dma_type) {
case MRAID_DMA_WBUF:
if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
pci_dma_sync_single(adapter->pdev,
ccb->buf_dma_h,
scb->scp->request_bufflen,
PCI_DMA_FROMDEVICE);
}
pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
scb->scp->request_bufflen, scb->dma_direction);
break;
case MRAID_DMA_WSG:
if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
pci_dma_sync_sg(adapter->pdev,
scb->scp->request_buffer,
scb->scp->use_sg, PCI_DMA_FROMDEVICE);
}
pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
scb->scp->use_sg, scb->dma_direction);
break;
default:
break;
}
return;
}
/** /**
* megaraid_mbox_dpc - the tasklet to complete the commands from completed list * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
...@@ -2549,55 +2587,6 @@ megaraid_mbox_dpc(unsigned long devp) ...@@ -2549,55 +2587,6 @@ megaraid_mbox_dpc(unsigned long devp)
} }
/**
* megaraid_mbox_sync_scb - sync kernel buffers
* @adapter : controller's soft state
* @scb : pointer to the resource packet
*
* DMA sync if required.
*/
static inline void
megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
{
mbox_ccb_t *ccb;
ccb = (mbox_ccb_t *)scb->ccb;
switch (scb->dma_type) {
case MRAID_DMA_WBUF:
if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
pci_dma_sync_single(adapter->pdev,
ccb->buf_dma_h,
scb->scp->request_bufflen,
PCI_DMA_FROMDEVICE);
}
pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
scb->scp->request_bufflen, scb->dma_direction);
break;
case MRAID_DMA_WSG:
if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
pci_dma_sync_sg(adapter->pdev,
scb->scp->request_buffer,
scb->scp->use_sg, PCI_DMA_FROMDEVICE);
}
pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
scb->scp->use_sg, scb->dma_direction);
break;
default:
break;
}
return;
}
/** /**
* megaraid_abort_handler - abort the scsi command * megaraid_abort_handler - abort the scsi command
* @scp : command to be aborted * @scp : command to be aborted
...@@ -2895,7 +2884,7 @@ mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) ...@@ -2895,7 +2884,7 @@ mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
mbox64_t *mbox64; mbox64_t *mbox64;
mbox_t *mbox; mbox_t *mbox;
uint8_t status; uint8_t status;
long i; int i;
mbox64 = raid_dev->mbox64; mbox64 = raid_dev->mbox64;
...@@ -2935,23 +2924,24 @@ mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) ...@@ -2935,23 +2924,24 @@ mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
if (i == 1000) { if (i == 1000) {
con_log(CL_ANN, (KERN_NOTICE con_log(CL_ANN, (KERN_NOTICE
"megaraid mailbox: wait for FW to boot.")); "megaraid mailbox: wait for FW to boot "));
for (i = 0; (mbox->numstatus == 0xFF) && for (i = 0; (mbox->numstatus == 0xFF) &&
(i < MBOX_RESET_WAIT); i++) { (i < MBOX_RESET_WAIT); i++) {
rmb(); rmb();
con_log(CL_ANN, (".")); con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
MBOX_RESET_WAIT - i));
msleep(1000); msleep(1000);
} }
if (i == MBOX_RESET_WAIT) { if (i == MBOX_RESET_WAIT) {
con_log(CL_ANN, (KERN_WARNING con_log(CL_ANN, (
"\nmegaraid mailbox: status not available\n")); "\nmegaraid mailbox: status not available\n"));
return -1; return -1;
} }
con_log(CL_ANN, ("[ok]\n")); con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
} }
} }
...@@ -3069,7 +3059,7 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) ...@@ -3069,7 +3059,7 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
* wait until the controller's mailbox is available to accept more commands. * wait until the controller's mailbox is available to accept more commands.
* wait for at most 1 second * wait for at most 1 second
*/ */
static inline int static int
megaraid_busywait_mbox(mraid_device_t *raid_dev) megaraid_busywait_mbox(mraid_device_t *raid_dev)
{ {
mbox_t *mbox = raid_dev->mbox; mbox_t *mbox = raid_dev->mbox;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* FILE : megaraid.h * FILE : megaraid_mbox.h
*/ */
#ifndef _MEGARAID_H_ #ifndef _MEGARAID_H_
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include "megaraid_ioctl.h" #include "megaraid_ioctl.h"
#define MEGARAID_VERSION "2.20.3.0" #define MEGARAID_VERSION "2.20.3.1"
#define MEGARAID_EXT_VERSION "(Release Date: Thu Aug 19 09:58:33 EDT 2004)" #define MEGARAID_EXT_VERSION "(Release Date: Tue Aug 24 09:43:35 EDT 2004)"
/* /*
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* FILE : megaraid_mm.c * FILE : megaraid_mm.c
* Version : v2.20.1.0 (July 23 2004) * Version : v2.20.2.0 (August 19 2004)
* *
* Common management module * Common management module
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment