Commit 26145f7e authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
  [SCSI] NCR5380: Fix bugs and canonicalize irq handler usage
  [SCSI] zfcp: fix cleanup of dismissed error recovery actions
  [SCSI] zfcp: fix dismissal of error recovery actions
  [SCSI] qla1280: convert to use the data buffer accessors
  [SCSI] iscsi: return data transfer residual for data-out commands
  [SCSI] iscsi_tcp: fix potential lockup with write commands
  [SCSI] aacraid: fix security weakness
  [SCSI] aacraid: fix up le32 issues in BlinkLED
  [SCSI] aacraid: fix potential panic in thread stop
  [SCSI] aacraid: don't assign cpu_to_le32(constant) to u8
parents 8002cedc 1e641664
...@@ -977,7 +977,9 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) ...@@ -977,7 +977,9 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
debug_text_event(adapter->erp_dbf, 2, "a_adis"); debug_text_event(adapter->erp_dbf, 2, "a_adis");
debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); erp_action->status |= ZFCP_STATUS_ERP_DISMISSED;
if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING)
zfcp_erp_action_ready(erp_action);
} }
int int
...@@ -1063,7 +1065,7 @@ zfcp_erp_thread(void *data) ...@@ -1063,7 +1065,7 @@ zfcp_erp_thread(void *data)
&adapter->status)) { &adapter->status)) {
write_lock_irqsave(&adapter->erp_lock, flags); write_lock_irqsave(&adapter->erp_lock, flags);
next = adapter->erp_ready_head.prev; next = adapter->erp_ready_head.next;
write_unlock_irqrestore(&adapter->erp_lock, flags); write_unlock_irqrestore(&adapter->erp_lock, flags);
if (next != &adapter->erp_ready_head) { if (next != &adapter->erp_ready_head) {
...@@ -1153,15 +1155,13 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) ...@@ -1153,15 +1155,13 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
/* /*
* check for dismissed status again to avoid follow-up actions, * check for dismissed status again to avoid follow-up actions,
* failing of targets and so on for dismissed actions * failing of targets and so on for dismissed actions,
* we go through down() here because there has been an up()
*/ */
retval = zfcp_erp_strategy_check_action(erp_action, retval); if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
retval = ZFCP_ERP_CONTINUES;
switch (retval) { switch (retval) {
case ZFCP_ERP_DISMISSED:
/* leave since this action has ridden to its ancestors */
debug_text_event(adapter->erp_dbf, 6, "a_st_dis2");
goto unlock;
case ZFCP_ERP_NOMEM: case ZFCP_ERP_NOMEM:
/* no memory to continue immediately, let it sleep */ /* no memory to continue immediately, let it sleep */
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) { if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
...@@ -3089,7 +3089,7 @@ zfcp_erp_action_enqueue(int action, ...@@ -3089,7 +3089,7 @@ zfcp_erp_action_enqueue(int action,
++adapter->erp_total_count; ++adapter->erp_total_count;
/* finally put it into 'ready' queue and kick erp thread */ /* finally put it into 'ready' queue and kick erp thread */
list_add(&erp_action->list, &adapter->erp_ready_head); list_add_tail(&erp_action->list, &adapter->erp_ready_head);
up(&adapter->erp_ready_sem); up(&adapter->erp_ready_sem);
retval = 0; retval = 0;
out: out:
......
...@@ -1339,10 +1339,10 @@ int aac_check_health(struct aac_dev * aac) ...@@ -1339,10 +1339,10 @@ int aac_check_health(struct aac_dev * aac)
aif = (struct aac_aifcmd *)hw_fib->data; aif = (struct aac_aifcmd *)hw_fib->data;
aif->command = cpu_to_le32(AifCmdEventNotify); aif->command = cpu_to_le32(AifCmdEventNotify);
aif->seqnum = cpu_to_le32(0xFFFFFFFF); aif->seqnum = cpu_to_le32(0xFFFFFFFF);
aif->data[0] = cpu_to_le32(AifEnExpEvent); aif->data[0] = AifEnExpEvent;
aif->data[1] = cpu_to_le32(AifExeFirmwarePanic); aif->data[1] = AifExeFirmwarePanic;
aif->data[2] = cpu_to_le32(AifHighPriority); aif->data[2] = AifHighPriority;
aif->data[3] = cpu_to_le32(BlinkLED); aif->data[3] = BlinkLED;
/* /*
* Put the FIB onto the * Put the FIB onto the
......
...@@ -636,7 +636,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file) ...@@ -636,7 +636,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
static int aac_cfg_ioctl(struct inode *inode, struct file *file, static int aac_cfg_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
} }
...@@ -691,7 +691,7 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) ...@@ -691,7 +691,7 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{ {
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
} }
...@@ -950,7 +950,8 @@ static struct scsi_host_template aac_driver_template = { ...@@ -950,7 +950,8 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac) static void __aac_shutdown(struct aac_dev * aac)
{ {
kthread_stop(aac->thread); if (aac->aif_thread)
kthread_stop(aac->thread);
aac_send_shutdown(aac); aac_send_shutdown(aac);
aac_adapter_disable_int(aac); aac_adapter_disable_int(aac);
free_irq(aac->pdev->irq, aac); free_irq(aac->pdev->irq, aac);
......
...@@ -393,7 +393,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) ...@@ -393,7 +393,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
#endif /* REAL_DMA */ #endif /* REAL_DMA */
NCR5380_intr(0, 0); NCR5380_intr(irq, dummy);
#if 0 #if 0
/* To be sure the int is not masked */ /* To be sure the int is not masked */
...@@ -458,7 +458,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) ...@@ -458,7 +458,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
#endif /* REAL_DMA */ #endif /* REAL_DMA */
NCR5380_intr(0, 0); NCR5380_intr(irq, dummy);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -684,7 +684,7 @@ int atari_scsi_detect(struct scsi_host_template *host) ...@@ -684,7 +684,7 @@ int atari_scsi_detect(struct scsi_host_template *host)
* interrupt after having cleared the pending flag for the DMA * interrupt after having cleared the pending flag for the DMA
* interrupt. */ * interrupt. */
if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW,
"SCSI NCR5380", scsi_tt_intr)) { "SCSI NCR5380", instance)) {
printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI);
scsi_unregister(atari_scsi_host); scsi_unregister(atari_scsi_host);
atari_stram_free(atari_dma_buffer); atari_stram_free(atari_dma_buffer);
...@@ -701,7 +701,7 @@ int atari_scsi_detect(struct scsi_host_template *host) ...@@ -701,7 +701,7 @@ int atari_scsi_detect(struct scsi_host_template *host)
IRQ_TYPE_PRIO, "Hades DMA emulator", IRQ_TYPE_PRIO, "Hades DMA emulator",
hades_dma_emulator)) { hades_dma_emulator)) {
printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2); printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2);
free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); free_irq(IRQ_TT_MFP_SCSI, instance);
scsi_unregister(atari_scsi_host); scsi_unregister(atari_scsi_host);
atari_stram_free(atari_dma_buffer); atari_stram_free(atari_dma_buffer);
atari_dma_buffer = 0; atari_dma_buffer = 0;
...@@ -761,7 +761,7 @@ int atari_scsi_detect(struct scsi_host_template *host) ...@@ -761,7 +761,7 @@ int atari_scsi_detect(struct scsi_host_template *host)
int atari_scsi_release(struct Scsi_Host *sh) int atari_scsi_release(struct Scsi_Host *sh)
{ {
if (IS_A_TT()) if (IS_A_TT())
free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); free_irq(IRQ_TT_MFP_SCSI, sh);
if (atari_dma_buffer) if (atari_dma_buffer)
atari_stram_free(atari_dma_buffer); atari_stram_free(atari_dma_buffer);
return 1; return 1;
......
...@@ -277,7 +277,8 @@ static int __init dtc_detect(struct scsi_host_template * tpnt) ...@@ -277,7 +277,8 @@ static int __init dtc_detect(struct scsi_host_template * tpnt)
/* With interrupts enabled, it will sometimes hang when doing heavy /* With interrupts enabled, it will sometimes hang when doing heavy
* reads. So better not enable them until I finger it out. */ * reads. So better not enable them until I finger it out. */
if (instance->irq != SCSI_IRQ_NONE) if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, "dtc", instance)) { if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED,
"dtc", instance)) {
printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE; instance->irq = SCSI_IRQ_NONE;
} }
...@@ -459,7 +460,7 @@ static int dtc_release(struct Scsi_Host *shost) ...@@ -459,7 +460,7 @@ static int dtc_release(struct Scsi_Host *shost)
NCR5380_local_declare(); NCR5380_local_declare();
NCR5380_setup(shost); NCR5380_setup(shost);
if (shost->irq) if (shost->irq)
free_irq(shost->irq, NULL); free_irq(shost->irq, shost);
NCR5380_exit(shost); NCR5380_exit(shost);
if (shost->io_port && shost->n_io_port) if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port); release_region(shost->io_port, shost->n_io_port);
......
...@@ -460,7 +460,8 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) ...@@ -460,7 +460,8 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
instance->irq = NCR5380_probe_irq(instance, 0xffff); instance->irq = NCR5380_probe_irq(instance, 0xffff);
if (instance->irq != SCSI_IRQ_NONE) if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, generic_NCR5380_intr, IRQF_DISABLED, "NCR5380", instance)) { if (request_irq(instance->irq, generic_NCR5380_intr,
IRQF_DISABLED, "NCR5380", instance)) {
printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE; instance->irq = SCSI_IRQ_NONE;
} }
...@@ -513,7 +514,7 @@ int generic_NCR5380_release_resources(struct Scsi_Host *instance) ...@@ -513,7 +514,7 @@ int generic_NCR5380_release_resources(struct Scsi_Host *instance)
NCR5380_setup(instance); NCR5380_setup(instance);
if (instance->irq != SCSI_IRQ_NONE) if (instance->irq != SCSI_IRQ_NONE)
free_irq(instance->irq, NULL); free_irq(instance->irq, instance);
NCR5380_exit(instance); NCR5380_exit(instance);
#ifndef CONFIG_SCSI_G_NCR5380_MEM #ifndef CONFIG_SCSI_G_NCR5380_MEM
......
...@@ -197,7 +197,7 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -197,7 +197,7 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (unlikely(!sc)) if (unlikely(!sc))
return; return;
tcp_ctask->xmstate = XMSTATE_IDLE; tcp_ctask->xmstate = XMSTATE_VALUE_IDLE;
tcp_ctask->r2t = NULL; tcp_ctask->r2t = NULL;
} }
...@@ -409,7 +409,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -409,7 +409,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
tcp_ctask->exp_datasn = r2tsn + 1; tcp_ctask->exp_datasn = r2tsn + 1;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT; set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate);
list_move_tail(&ctask->running, &conn->xmitqueue); list_move_tail(&ctask->running, &conn->xmitqueue);
scsi_queue_work(session->host, &conn->xmitwork); scsi_queue_work(session->host, &conn->xmitwork);
...@@ -1254,7 +1254,7 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, ...@@ -1254,7 +1254,7 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
tcp_ctask->xmstate |= XMSTATE_W_PAD; set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);
} }
/** /**
...@@ -1269,7 +1269,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) ...@@ -1269,7 +1269,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT; tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT;
} }
/** /**
...@@ -1283,10 +1283,10 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) ...@@ -1283,10 +1283,10 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
* xmit. * xmit.
* *
* Management xmit state machine consists of these states: * Management xmit state machine consists of these states:
* XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header
* XMSTATE_IMM_HDR - PDU Header xmit in progress * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress
* XMSTATE_IMM_DATA - PDU Data xmit in progress * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress
* XMSTATE_IDLE - management PDU is done * XMSTATE_VALUE_IDLE - management PDU is done
**/ **/
static int static int
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
...@@ -1297,12 +1297,12 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) ...@@ -1297,12 +1297,12 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
conn->id, tcp_mtask->xmstate, mtask->itt); conn->id, tcp_mtask->xmstate, mtask->itt);
if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) { if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) {
iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
sizeof(struct iscsi_hdr)); sizeof(struct iscsi_hdr));
if (mtask->data_count) { if (mtask->data_count) {
tcp_mtask->xmstate |= XMSTATE_IMM_DATA; set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate);
iscsi_buf_init_iov(&tcp_mtask->sendbuf, iscsi_buf_init_iov(&tcp_mtask->sendbuf,
(char*)mtask->data, (char*)mtask->data,
mtask->data_count); mtask->data_count);
...@@ -1315,21 +1315,20 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) ...@@ -1315,21 +1315,20 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
(u8*)tcp_mtask->hdrext); (u8*)tcp_mtask->hdrext);
tcp_mtask->sent = 0; tcp_mtask->sent = 0;
tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT; clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate);
tcp_mtask->xmstate |= XMSTATE_IMM_HDR; set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate);
} }
if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) {
rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
mtask->data_count); mtask->data_count);
if (rc) if (rc)
return rc; return rc;
tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate);
} }
if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) {
BUG_ON(!mtask->data_count); BUG_ON(!mtask->data_count);
tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
/* FIXME: implement. /* FIXME: implement.
* Virtual buffer could be spreaded across multiple pages... * Virtual buffer could be spreaded across multiple pages...
*/ */
...@@ -1339,13 +1338,13 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) ...@@ -1339,13 +1338,13 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
&mtask->data_count, &tcp_mtask->sent); &mtask->data_count, &tcp_mtask->sent);
if (rc) { if (rc) {
tcp_mtask->xmstate |= XMSTATE_IMM_DATA; set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate);
return rc; return rc;
} }
} while (mtask->data_count); } while (mtask->data_count);
} }
BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE);
if (mtask->hdr->itt == RESERVED_ITT) { if (mtask->hdr->itt == RESERVED_ITT) {
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
...@@ -1365,7 +1364,7 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1365,7 +1364,7 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
int rc = 0; int rc = 0;
if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) { if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) {
tcp_ctask->sent = 0; tcp_ctask->sent = 0;
tcp_ctask->sg_count = 0; tcp_ctask->sg_count = 0;
tcp_ctask->exp_datasn = 0; tcp_ctask->exp_datasn = 0;
...@@ -1390,21 +1389,21 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1390,21 +1389,21 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (conn->hdrdgst_en) if (conn->hdrdgst_en)
iscsi_hdr_digest(conn, &tcp_ctask->headbuf, iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
(u8*)tcp_ctask->hdrext); (u8*)tcp_ctask->hdrext);
tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT; clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate);
tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT; set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate);
} }
if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) { if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) {
rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
if (rc) if (rc)
return rc; return rc;
tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT; clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate);
if (sc->sc_data_direction != DMA_TO_DEVICE) if (sc->sc_data_direction != DMA_TO_DEVICE)
return 0; return 0;
if (ctask->imm_count) { if (ctask->imm_count) {
tcp_ctask->xmstate |= XMSTATE_IMM_DATA; set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate);
iscsi_set_padding(tcp_ctask, ctask->imm_count); iscsi_set_padding(tcp_ctask, ctask->imm_count);
if (ctask->conn->datadgst_en) { if (ctask->conn->datadgst_en) {
...@@ -1414,9 +1413,10 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1414,9 +1413,10 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
} }
} }
if (ctask->unsol_count) if (ctask->unsol_count) {
tcp_ctask->xmstate |= set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate);
XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
}
} }
return rc; return rc;
} }
...@@ -1428,25 +1428,25 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1428,25 +1428,25 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
int sent = 0, rc; int sent = 0, rc;
if (tcp_ctask->xmstate & XMSTATE_W_PAD) { if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) {
iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
tcp_ctask->pad_count); tcp_ctask->pad_count);
if (conn->datadgst_en) if (conn->datadgst_en)
crypto_hash_update(&tcp_conn->tx_hash, crypto_hash_update(&tcp_conn->tx_hash,
&tcp_ctask->sendbuf.sg, &tcp_ctask->sendbuf.sg,
tcp_ctask->sendbuf.sg.length); tcp_ctask->sendbuf.sg.length);
} else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate))
return 0; return 0;
tcp_ctask->xmstate &= ~XMSTATE_W_PAD; clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);
tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD; clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate);
debug_scsi("sending %d pad bytes for itt 0x%x\n", debug_scsi("sending %d pad bytes for itt 0x%x\n",
tcp_ctask->pad_count, ctask->itt); tcp_ctask->pad_count, ctask->itt);
rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
&sent); &sent);
if (rc) { if (rc) {
debug_scsi("padding send failed %d\n", rc); debug_scsi("padding send failed %d\n", rc);
tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD; set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate);
} }
return rc; return rc;
} }
...@@ -1465,11 +1465,11 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, ...@@ -1465,11 +1465,11 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
tcp_ctask = ctask->dd_data; tcp_ctask = ctask->dd_data;
tcp_conn = conn->dd_data; tcp_conn = conn->dd_data;
if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) {
crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
iscsi_buf_init_iov(buf, (char*)digest, 4); iscsi_buf_init_iov(buf, (char*)digest, 4);
} }
tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate);
rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
if (!rc) if (!rc)
...@@ -1478,7 +1478,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, ...@@ -1478,7 +1478,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
else { else {
debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
*digest, ctask->itt); *digest, ctask->itt);
tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST; set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate);
} }
return rc; return rc;
} }
...@@ -1526,8 +1526,8 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1526,8 +1526,8 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
struct iscsi_data_task *dtask; struct iscsi_data_task *dtask;
int rc; int rc;
tcp_ctask->xmstate |= XMSTATE_UNS_DATA; set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) {
dtask = &tcp_ctask->unsol_dtask; dtask = &tcp_ctask->unsol_dtask;
iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
...@@ -1537,14 +1537,14 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1537,14 +1537,14 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_hdr_digest(conn, &tcp_ctask->headbuf, iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
(u8*)dtask->hdrext); (u8*)dtask->hdrext);
tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
iscsi_set_padding(tcp_ctask, ctask->data_count); iscsi_set_padding(tcp_ctask, ctask->data_count);
} }
rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
if (rc) { if (rc) {
tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
tcp_ctask->xmstate |= XMSTATE_UNS_HDR; set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate);
return rc; return rc;
} }
...@@ -1565,16 +1565,15 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1565,16 +1565,15 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
int rc; int rc;
if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) {
BUG_ON(!ctask->unsol_count); BUG_ON(!ctask->unsol_count);
tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
send_hdr: send_hdr:
rc = iscsi_send_unsol_hdr(conn, ctask); rc = iscsi_send_unsol_hdr(conn, ctask);
if (rc) if (rc)
return rc; return rc;
} }
if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) {
struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
int start = tcp_ctask->sent; int start = tcp_ctask->sent;
...@@ -1584,14 +1583,14 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1584,14 +1583,14 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
ctask->unsol_count -= tcp_ctask->sent - start; ctask->unsol_count -= tcp_ctask->sent - start;
if (rc) if (rc)
return rc; return rc;
tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
/* /*
* Done with the Data-Out. Next, check if we need * Done with the Data-Out. Next, check if we need
* to send another unsolicited Data-Out. * to send another unsolicited Data-Out.
*/ */
if (ctask->unsol_count) { if (ctask->unsol_count) {
debug_scsi("sending more uns\n"); debug_scsi("sending more uns\n");
tcp_ctask->xmstate |= XMSTATE_UNS_INIT; set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
goto send_hdr; goto send_hdr;
} }
} }
...@@ -1607,7 +1606,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, ...@@ -1607,7 +1606,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
struct iscsi_data_task *dtask; struct iscsi_data_task *dtask;
int left, rc; int left, rc;
if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) { if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) {
if (!tcp_ctask->r2t) { if (!tcp_ctask->r2t) {
spin_lock_bh(&session->lock); spin_lock_bh(&session->lock);
__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
...@@ -1621,19 +1620,19 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, ...@@ -1621,19 +1620,19 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
if (conn->hdrdgst_en) if (conn->hdrdgst_en)
iscsi_hdr_digest(conn, &r2t->headbuf, iscsi_hdr_digest(conn, &r2t->headbuf,
(u8*)dtask->hdrext); (u8*)dtask->hdrext);
tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT; clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate);
tcp_ctask->xmstate |= XMSTATE_SOL_HDR; set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate);
} }
if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) {
r2t = tcp_ctask->r2t; r2t = tcp_ctask->r2t;
dtask = &r2t->dtask; dtask = &r2t->dtask;
rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
if (rc) if (rc)
return rc; return rc;
tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate);
tcp_ctask->xmstate |= XMSTATE_SOL_DATA; set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate);
if (conn->datadgst_en) { if (conn->datadgst_en) {
iscsi_data_digest_init(conn->dd_data, tcp_ctask); iscsi_data_digest_init(conn->dd_data, tcp_ctask);
...@@ -1646,7 +1645,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, ...@@ -1646,7 +1645,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
r2t->sent); r2t->sent);
} }
if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) {
r2t = tcp_ctask->r2t; r2t = tcp_ctask->r2t;
dtask = &r2t->dtask; dtask = &r2t->dtask;
...@@ -1655,7 +1654,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, ...@@ -1655,7 +1654,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
&dtask->digestbuf, &dtask->digest); &dtask->digestbuf, &dtask->digest);
if (rc) if (rc)
return rc; return rc;
tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate);
/* /*
* Done with this Data-Out. Next, check if we have * Done with this Data-Out. Next, check if we have
...@@ -1700,32 +1699,32 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, ...@@ -1700,32 +1699,32 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
* xmit stages. * xmit stages.
* *
*iscsi_send_cmd_hdr() *iscsi_send_cmd_hdr()
* XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate
* Header Digest * Header Digest
* XMSTATE_CMD_HDR_XMIT - Transmit header in progress * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress
* *
*iscsi_send_padding *iscsi_send_padding
* XMSTATE_W_PAD - Prepare and send pading * XMSTATE_BIT_W_PAD - Prepare and send pading
* XMSTATE_W_RESEND_PAD - retry send pading * XMSTATE_BIT_W_RESEND_PAD - retry send pading
* *
*iscsi_send_digest *iscsi_send_digest
* XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
* XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest
* *
*iscsi_send_unsol_hdr *iscsi_send_unsol_hdr
* XMSTATE_UNS_INIT - prepare un-solicit data header and digest * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest
* XMSTATE_UNS_HDR - send un-solicit header * XMSTATE_BIT_UNS_HDR - send un-solicit header
* *
*iscsi_send_unsol_pdu *iscsi_send_unsol_pdu
* XMSTATE_UNS_DATA - send un-solicit data in progress * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress
* *
*iscsi_send_sol_pdu *iscsi_send_sol_pdu
* XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize
* XMSTATE_SOL_HDR - send solicit header * XMSTATE_BIT_SOL_HDR - send solicit header
* XMSTATE_SOL_DATA - send solicit data * XMSTATE_BIT_SOL_DATA - send solicit data
* *
*iscsi_tcp_ctask_xmit *iscsi_tcp_ctask_xmit
* XMSTATE_IMM_DATA - xmit managment data (??) * XMSTATE_BIT_IMM_DATA - xmit managment data (??)
**/ **/
static int static int
iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
...@@ -1742,13 +1741,13 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) ...@@ -1742,13 +1741,13 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
return 0; return 0;
if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) {
rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
&tcp_ctask->sent, &ctask->imm_count, &tcp_ctask->sent, &ctask->imm_count,
&tcp_ctask->immbuf, &tcp_ctask->immdigest); &tcp_ctask->immbuf, &tcp_ctask->immdigest);
if (rc) if (rc)
return rc; return rc;
tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate);
} }
rc = iscsi_send_unsol_pdu(conn, ctask); rc = iscsi_send_unsol_pdu(conn, ctask);
...@@ -1981,7 +1980,7 @@ static void ...@@ -1981,7 +1980,7 @@ static void
iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{ {
struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT; tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT;
} }
static int static int
......
...@@ -32,21 +32,21 @@ ...@@ -32,21 +32,21 @@
#define IN_PROGRESS_PAD_RECV 0x4 #define IN_PROGRESS_PAD_RECV 0x4
/* xmit state machine */ /* xmit state machine */
#define XMSTATE_IDLE 0x0 #define XMSTATE_VALUE_IDLE 0
#define XMSTATE_CMD_HDR_INIT 0x1 #define XMSTATE_BIT_CMD_HDR_INIT 0
#define XMSTATE_CMD_HDR_XMIT 0x2 #define XMSTATE_BIT_CMD_HDR_XMIT 1
#define XMSTATE_IMM_HDR 0x4 #define XMSTATE_BIT_IMM_HDR 2
#define XMSTATE_IMM_DATA 0x8 #define XMSTATE_BIT_IMM_DATA 3
#define XMSTATE_UNS_INIT 0x10 #define XMSTATE_BIT_UNS_INIT 4
#define XMSTATE_UNS_HDR 0x20 #define XMSTATE_BIT_UNS_HDR 5
#define XMSTATE_UNS_DATA 0x40 #define XMSTATE_BIT_UNS_DATA 6
#define XMSTATE_SOL_HDR 0x80 #define XMSTATE_BIT_SOL_HDR 7
#define XMSTATE_SOL_DATA 0x100 #define XMSTATE_BIT_SOL_DATA 8
#define XMSTATE_W_PAD 0x200 #define XMSTATE_BIT_W_PAD 9
#define XMSTATE_W_RESEND_PAD 0x400 #define XMSTATE_BIT_W_RESEND_PAD 10
#define XMSTATE_W_RESEND_DATA_DIGEST 0x800 #define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11
#define XMSTATE_IMM_HDR_INIT 0x1000 #define XMSTATE_BIT_IMM_HDR_INIT 12
#define XMSTATE_SOL_HDR_INIT 0x2000 #define XMSTATE_BIT_SOL_HDR_INIT 13
#define ISCSI_PAD_LEN 4 #define ISCSI_PAD_LEN 4
#define ISCSI_SG_TABLESIZE SG_ALL #define ISCSI_SG_TABLESIZE SG_ALL
...@@ -122,7 +122,7 @@ struct iscsi_data_task { ...@@ -122,7 +122,7 @@ struct iscsi_data_task {
struct iscsi_tcp_mgmt_task { struct iscsi_tcp_mgmt_task {
struct iscsi_hdr hdr; struct iscsi_hdr hdr;
char hdrext[sizeof(__u32)]; /* Header-Digest */ char hdrext[sizeof(__u32)]; /* Header-Digest */
int xmstate; /* mgmt xmit progress */ unsigned long xmstate; /* mgmt xmit progress */
struct iscsi_buf headbuf; /* header buffer */ struct iscsi_buf headbuf; /* header buffer */
struct iscsi_buf sendbuf; /* in progress buffer */ struct iscsi_buf sendbuf; /* in progress buffer */
int sent; int sent;
...@@ -150,7 +150,7 @@ struct iscsi_tcp_cmd_task { ...@@ -150,7 +150,7 @@ struct iscsi_tcp_cmd_task {
int pad_count; /* padded bytes */ int pad_count; /* padded bytes */
struct iscsi_buf headbuf; /* header buf (xmit) */ struct iscsi_buf headbuf; /* header buf (xmit) */
struct iscsi_buf sendbuf; /* in progress buffer*/ struct iscsi_buf sendbuf; /* in progress buffer*/
int xmstate; /* xmit xtate machine */ unsigned long xmstate; /* xmit xtate machine */
int sent; int sent;
struct scatterlist *sg; /* per-cmd SG list */ struct scatterlist *sg; /* per-cmd SG list */
struct scatterlist *bad_sg; /* assert statement */ struct scatterlist *bad_sg; /* assert statement */
......
...@@ -291,9 +291,6 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, ...@@ -291,9 +291,6 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
} }
if (sc->sc_data_direction == DMA_TO_DEVICE)
goto out;
if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
int res_count = be32_to_cpu(rhdr->residual_count); int res_count = be32_to_cpu(rhdr->residual_count);
......
...@@ -303,7 +303,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) ...@@ -303,7 +303,7 @@ int macscsi_detect(struct scsi_host_template * tpnt)
if (instance->irq != SCSI_IRQ_NONE) if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW,
"ncr5380", instance)) { "ncr5380", instance)) {
printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq); instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE; instance->irq = SCSI_IRQ_NONE;
...@@ -326,7 +326,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) ...@@ -326,7 +326,7 @@ int macscsi_detect(struct scsi_host_template * tpnt)
int macscsi_release (struct Scsi_Host *shpnt) int macscsi_release (struct Scsi_Host *shpnt)
{ {
if (shpnt->irq != SCSI_IRQ_NONE) if (shpnt->irq != SCSI_IRQ_NONE)
free_irq (shpnt->irq, NCR5380_intr); free_irq(shpnt->irq, shpnt);
NCR5380_exit(shpnt); NCR5380_exit(shpnt);
return 0; return 0;
......
...@@ -453,7 +453,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt) ...@@ -453,7 +453,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
if (instance->irq != SCSI_IRQ_NONE) if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED,
"pas16", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n", printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq); instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE; instance->irq = SCSI_IRQ_NONE;
...@@ -604,7 +605,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src ...@@ -604,7 +605,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
static int pas16_release(struct Scsi_Host *shost) static int pas16_release(struct Scsi_Host *shost)
{ {
if (shost->irq) if (shost->irq)
free_irq(shost->irq, NULL); free_irq(shost->irq, shost);
NCR5380_exit(shost); NCR5380_exit(shost);
if (shost->dma_channel != 0xff) if (shost->dma_channel != 0xff)
free_dma(shost->dma_channel); free_dma(shost->dma_channel);
......
...@@ -1310,14 +1310,7 @@ qla1280_done(struct scsi_qla_host *ha) ...@@ -1310,14 +1310,7 @@ qla1280_done(struct scsi_qla_host *ha)
} }
/* Release memory used for this I/O */ /* Release memory used for this I/O */
if (cmd->use_sg) { scsi_dma_unmap(cmd);
pci_unmap_sg(ha->pdev, cmd->request_buffer,
cmd->use_sg, cmd->sc_data_direction);
} else if (cmd->request_bufflen) {
pci_unmap_single(ha->pdev, sp->saved_dma_handle,
cmd->request_bufflen,
cmd->sc_data_direction);
}
/* Call the mid-level driver interrupt handler */ /* Call the mid-level driver interrupt handler */
CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
...@@ -1406,14 +1399,14 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) ...@@ -1406,14 +1399,14 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
break; break;
case CS_DATA_UNDERRUN: case CS_DATA_UNDERRUN:
if ((cp->request_bufflen - residual_length) < if ((scsi_bufflen(cp) - residual_length) <
cp->underflow) { cp->underflow) {
printk(KERN_WARNING printk(KERN_WARNING
"scsi: Underflow detected - retrying " "scsi: Underflow detected - retrying "
"command.\n"); "command.\n");
host_status = DID_ERROR; host_status = DID_ERROR;
} else { } else {
cp->resid = residual_length; scsi_set_resid(cp, residual_length);
host_status = DID_OK; host_status = DID_OK;
} }
break; break;
...@@ -2775,33 +2768,28 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -2775,33 +2768,28 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
struct device_reg __iomem *reg = ha->iobase; struct device_reg __iomem *reg = ha->iobase;
struct scsi_cmnd *cmd = sp->cmd; struct scsi_cmnd *cmd = sp->cmd;
cmd_a64_entry_t *pkt; cmd_a64_entry_t *pkt;
struct scatterlist *sg = NULL, *s;
__le32 *dword_ptr; __le32 *dword_ptr;
dma_addr_t dma_handle; dma_addr_t dma_handle;
int status = 0; int status = 0;
int cnt; int cnt;
int req_cnt; int req_cnt;
u16 seg_cnt; int seg_cnt;
u8 dir; u8 dir;
ENTER("qla1280_64bit_start_scsi:"); ENTER("qla1280_64bit_start_scsi:");
/* Calculate number of entries and segments required. */ /* Calculate number of entries and segments required. */
req_cnt = 1; req_cnt = 1;
if (cmd->use_sg) { seg_cnt = scsi_dma_map(cmd);
sg = (struct scatterlist *) cmd->request_buffer; if (seg_cnt > 0) {
seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
if (seg_cnt > 2) { if (seg_cnt > 2) {
req_cnt += (seg_cnt - 2) / 5; req_cnt += (seg_cnt - 2) / 5;
if ((seg_cnt - 2) % 5) if ((seg_cnt - 2) % 5)
req_cnt++; req_cnt++;
} }
} else if (cmd->request_bufflen) { /* If data transfer. */ } else if (seg_cnt < 0) {
seg_cnt = 1; status = 1;
} else { goto out;
seg_cnt = 0;
} }
if ((req_cnt + 2) >= ha->req_q_cnt) { if ((req_cnt + 2) >= ha->req_q_cnt) {
...@@ -2889,124 +2877,104 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -2889,124 +2877,104 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
* Load data segments. * Load data segments.
*/ */
if (seg_cnt) { /* If data transfer. */ if (seg_cnt) { /* If data transfer. */
struct scatterlist *sg, *s;
int remseg = seg_cnt; int remseg = seg_cnt;
sg = scsi_sglist(cmd);
/* Setup packet address segment pointer. */ /* Setup packet address segment pointer. */
dword_ptr = (u32 *)&pkt->dseg_0_address; dword_ptr = (u32 *)&pkt->dseg_0_address;
if (cmd->use_sg) { /* If scatter gather */ /* Load command entry data segments. */
/* Load command entry data segments. */ for_each_sg(sg, s, seg_cnt, cnt) {
for_each_sg(sg, s, seg_cnt, cnt) { if (cnt == 2)
if (cnt == 2) break;
dma_handle = sg_dma_address(s);
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel)
sn_pci_set_vchan(ha->pdev,
(unsigned long *)&dma_handle,
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32(dma_handle));
*dword_ptr++ =
cpu_to_le32(pci_dma_hi32(dma_handle));
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(pci_dma_hi32(dma_handle)),
cpu_to_le32(pci_dma_lo32(dma_handle)),
cpu_to_le32(sg_dma_len(sg_next(s))));
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
"command packet data - b %i, t %i, l %i \n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
/*
* Build continuation packets.
*/
dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
"remains\n", seg_cnt);
while (remseg > 0) {
/* Update sg start */
sg = s;
/* Adjust ring index. */
ha->req_ring_index++;
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
ha->req_ring_index = 0;
ha->request_ring_ptr =
ha->request_ring;
} else
ha->request_ring_ptr++;
pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
/* Zero out packet. */
memset(pkt, 0, REQUEST_ENTRY_SIZE);
/* Load packet defaults. */
((struct cont_a64_entry *) pkt)->entry_type =
CONTINUE_A64_TYPE;
((struct cont_a64_entry *) pkt)->entry_count = 1;
((struct cont_a64_entry *) pkt)->sys_define =
(uint8_t)ha->req_ring_index;
/* Setup packet address segment pointer. */
dword_ptr =
(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
/* Load continuation entry data segments. */
for_each_sg(sg, s, remseg, cnt) {
if (cnt == 5)
break; break;
dma_handle = sg_dma_address(s); dma_handle = sg_dma_address(s);
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel) if (ha->flags.use_pci_vchannel)
sn_pci_set_vchan(ha->pdev, sn_pci_set_vchan(ha->pdev,
(unsigned long *)&dma_handle, (unsigned long *)&dma_handle,
SCSI_BUS_32(cmd)); SCSI_BUS_32(cmd));
#endif #endif
*dword_ptr++ = *dword_ptr++ =
cpu_to_le32(pci_dma_lo32(dma_handle)); cpu_to_le32(pci_dma_lo32(dma_handle));
*dword_ptr++ = *dword_ptr++ =
cpu_to_le32(pci_dma_hi32(dma_handle)); cpu_to_le32(pci_dma_hi32(dma_handle));
*dword_ptr++ = cpu_to_le32(sg_dma_len(s)); *dword_ptr++ =
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(pci_dma_hi32(dma_handle)), cpu_to_le32(pci_dma_hi32(dma_handle)),
cpu_to_le32(pci_dma_lo32(dma_handle)), cpu_to_le32(pci_dma_lo32(dma_handle)),
cpu_to_le32(sg_dma_len(sg_next(s)))); cpu_to_le32(sg_dma_len(s)));
remseg--;
} }
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " remseg -= cnt;
"command packet data - b %i, t %i, l %i \n", dprintk(5, "qla1280_64bit_start_scsi: "
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), "continuation packet data - b %i, t "
SCSI_LUN_32(cmd)); "%i, l %i \n", SCSI_BUS_32(cmd),
qla1280_dump_buffer(5, (char *)pkt, SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
REQUEST_ENTRY_SIZE);
/*
* Build continuation packets.
*/
dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
"remains\n", seg_cnt);
while (remseg > 0) {
/* Update sg start */
sg = s;
/* Adjust ring index. */
ha->req_ring_index++;
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
ha->req_ring_index = 0;
ha->request_ring_ptr =
ha->request_ring;
} else
ha->request_ring_ptr++;
pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
/* Zero out packet. */
memset(pkt, 0, REQUEST_ENTRY_SIZE);
/* Load packet defaults. */
((struct cont_a64_entry *) pkt)->entry_type =
CONTINUE_A64_TYPE;
((struct cont_a64_entry *) pkt)->entry_count = 1;
((struct cont_a64_entry *) pkt)->sys_define =
(uint8_t)ha->req_ring_index;
/* Setup packet address segment pointer. */
dword_ptr =
(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
/* Load continuation entry data segments. */
for_each_sg(sg, s, remseg, cnt) {
if (cnt == 5)
break;
dma_handle = sg_dma_address(s);
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel)
sn_pci_set_vchan(ha->pdev,
(unsigned long *)&dma_handle,
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32(dma_handle));
*dword_ptr++ =
cpu_to_le32(pci_dma_hi32(dma_handle));
*dword_ptr++ =
cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(pci_dma_hi32(dma_handle)),
cpu_to_le32(pci_dma_lo32(dma_handle)),
cpu_to_le32(sg_dma_len(s)));
}
remseg -= cnt;
dprintk(5, "qla1280_64bit_start_scsi: "
"continuation packet data - b %i, t "
"%i, l %i \n", SCSI_BUS_32(cmd),
SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
}
} else { /* No scatter gather data transfer */
dma_handle = pci_map_single(ha->pdev,
cmd->request_buffer,
cmd->request_bufflen,
cmd->sc_data_direction);
sp->saved_dma_handle = dma_handle;
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ha->flags.use_pci_vchannel)
sn_pci_set_vchan(ha->pdev,
(unsigned long *)&dma_handle,
SCSI_BUS_32(cmd));
#endif
*dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle));
*dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle));
*dword_ptr = cpu_to_le32(cmd->request_bufflen);
dprintk(5, "qla1280_64bit_start_scsi: No scatter/"
"gather command packet data - b %i, t %i, "
"l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE); REQUEST_ENTRY_SIZE);
} }
...@@ -3068,12 +3036,11 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -3068,12 +3036,11 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
struct device_reg __iomem *reg = ha->iobase; struct device_reg __iomem *reg = ha->iobase;
struct scsi_cmnd *cmd = sp->cmd; struct scsi_cmnd *cmd = sp->cmd;
struct cmd_entry *pkt; struct cmd_entry *pkt;
struct scatterlist *sg = NULL, *s;
__le32 *dword_ptr; __le32 *dword_ptr;
int status = 0; int status = 0;
int cnt; int cnt;
int req_cnt; int req_cnt;
uint16_t seg_cnt; int seg_cnt;
dma_addr_t dma_handle; dma_addr_t dma_handle;
u8 dir; u8 dir;
...@@ -3083,18 +3050,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -3083,18 +3050,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
cmd->cmnd[0]); cmd->cmnd[0]);
/* Calculate number of entries and segments required. */ /* Calculate number of entries and segments required. */
req_cnt = 1; seg_cnt = scsi_dma_map(cmd);
if (cmd->use_sg) { if (seg_cnt) {
/*
* We must build an SG list in adapter format, as the kernel's
* SG list cannot be used directly because of data field size
* (__alpha__) differences and the kernel SG list uses virtual
* addresses where we need physical addresses.
*/
sg = (struct scatterlist *) cmd->request_buffer;
seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
/* /*
* if greater than four sg entries then we need to allocate * if greater than four sg entries then we need to allocate
* continuation entries * continuation entries
...@@ -3106,14 +3063,9 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -3106,14 +3063,9 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
} }
dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
cmd, seg_cnt, req_cnt); cmd, seg_cnt, req_cnt);
} else if (cmd->request_bufflen) { /* If data transfer. */ } else if (seg_cnt < 0) {
dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", status = 1;
SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, goto out;
cmd->cmnd[0]);
seg_cnt = 1;
} else {
/* dprintk(1, "No data transfer \n"); */
seg_cnt = 0;
} }
if ((req_cnt + 2) >= ha->req_q_cnt) { if ((req_cnt + 2) >= ha->req_q_cnt) {
...@@ -3194,91 +3146,84 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -3194,91 +3146,84 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
* Load data segments. * Load data segments.
*/ */
if (seg_cnt) { if (seg_cnt) {
struct scatterlist *sg, *s;
int remseg = seg_cnt; int remseg = seg_cnt;
sg = scsi_sglist(cmd);
/* Setup packet address segment pointer. */ /* Setup packet address segment pointer. */
dword_ptr = &pkt->dseg_0_address; dword_ptr = &pkt->dseg_0_address;
if (cmd->use_sg) { /* If scatter gather */ dprintk(3, "Building S/G data segments..\n");
dprintk(3, "Building S/G data segments..\n"); qla1280_dump_buffer(1, (char *)sg, 4 * 16);
qla1280_dump_buffer(1, (char *)sg, 4 * 16);
/* Load command entry data segments. */
for_each_sg(sg, s, seg_cnt, cnt) {
if (cnt == 4)
break;
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
(pci_dma_lo32(sg_dma_address(s))),
(sg_dma_len(s)));
remseg--;
}
/*
* Build continuation packets.
*/
dprintk(3, "S/G Building Continuation"
"...seg_cnt=0x%x remains\n", seg_cnt);
while (remseg > 0) {
/* Continue from end point */
sg = s;
/* Adjust ring index. */
ha->req_ring_index++;
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
ha->req_ring_index = 0;
ha->request_ring_ptr =
ha->request_ring;
} else
ha->request_ring_ptr++;
pkt = (struct cmd_entry *)ha->request_ring_ptr;
/* Zero out packet. */
memset(pkt, 0, REQUEST_ENTRY_SIZE);
/* Load packet defaults. */
((struct cont_entry *) pkt)->
entry_type = CONTINUE_TYPE;
((struct cont_entry *) pkt)->entry_count = 1;
/* Load command entry data segments. */ ((struct cont_entry *) pkt)->sys_define =
for_each_sg(sg, s, seg_cnt, cnt) { (uint8_t) ha->req_ring_index;
if (cnt == 4)
/* Setup packet address segment pointer. */
dword_ptr =
&((struct cont_entry *) pkt)->dseg_0_address;
/* Load continuation entry data segments. */
for_each_sg(sg, s, remseg, cnt) {
if (cnt == 7)
break; break;
*dword_ptr++ = *dword_ptr++ =
cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(s)); *dword_ptr++ =
dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", cpu_to_le32(sg_dma_len(s));
(pci_dma_lo32(sg_dma_address(s))), dprintk(1,
(sg_dma_len(s))); "S/G Segment Cont. phys_addr=0x%x, "
remseg--; "len=0x%x\n",
} cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
/* cpu_to_le32(sg_dma_len(s)));
* Build continuation packets.
*/
dprintk(3, "S/G Building Continuation"
"...seg_cnt=0x%x remains\n", seg_cnt);
while (remseg > 0) {
/* Continue from end point */
sg = s;
/* Adjust ring index. */
ha->req_ring_index++;
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
ha->req_ring_index = 0;
ha->request_ring_ptr =
ha->request_ring;
} else
ha->request_ring_ptr++;
pkt = (struct cmd_entry *)ha->request_ring_ptr;
/* Zero out packet. */
memset(pkt, 0, REQUEST_ENTRY_SIZE);
/* Load packet defaults. */
((struct cont_entry *) pkt)->
entry_type = CONTINUE_TYPE;
((struct cont_entry *) pkt)->entry_count = 1;
((struct cont_entry *) pkt)->sys_define =
(uint8_t) ha->req_ring_index;
/* Setup packet address segment pointer. */
dword_ptr =
&((struct cont_entry *) pkt)->dseg_0_address;
/* Load continuation entry data segments. */
for_each_sg(sg, s, remseg, cnt) {
if (cnt == 7)
break;
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
*dword_ptr++ =
cpu_to_le32(sg_dma_len(s));
dprintk(1,
"S/G Segment Cont. phys_addr=0x%x, "
"len=0x%x\n",
cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
cpu_to_le32(sg_dma_len(s)));
}
remseg -= cnt;
dprintk(5, "qla1280_32bit_start_scsi: "
"continuation packet data - "
"scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
} }
} else { /* No S/G data transfer */ remseg -= cnt;
dma_handle = pci_map_single(ha->pdev, dprintk(5, "qla1280_32bit_start_scsi: "
cmd->request_buffer, "continuation packet data - "
cmd->request_bufflen, "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
cmd->sc_data_direction); SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
sp->saved_dma_handle = dma_handle; qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE);
*dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle));
*dword_ptr = cpu_to_le32(cmd->request_bufflen);
} }
} else { /* No data transfer at all */ } else { /* No data transfer at all */
dprintk(5, "qla1280_32bit_start_scsi: No data, command " dprintk(5, "qla1280_32bit_start_scsi: No data, command "
...@@ -4086,9 +4031,9 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) ...@@ -4086,9 +4031,9 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
for (i = 0; i < cmd->cmd_len; i++) { for (i = 0; i < cmd->cmd_len; i++) {
printk("0x%02x ", cmd->cmnd[i]); printk("0x%02x ", cmd->cmnd[i]);
} }
printk(" seg_cnt =%d\n", cmd->use_sg); printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
printk(" request buffer=0x%p, request buffer len=0x%x\n", printk(" request buffer=0x%p, request buffer len=0x%x\n",
cmd->request_buffer, cmd->request_bufflen); scsi_sglist(cmd), scsi_bufflen(cmd));
/* if (cmd->use_sg) /* if (cmd->use_sg)
{ {
sg = (struct scatterlist *) cmd->request_buffer; sg = (struct scatterlist *) cmd->request_buffer;
......
...@@ -268,7 +268,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) ...@@ -268,7 +268,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt)
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (request_irq(instance->irq, scsi_sun3_intr, if (request_irq(instance->irq, scsi_sun3_intr,
0, "Sun3SCSI-5380", NULL)) { 0, "Sun3SCSI-5380", instance)) {
#ifndef REAL_DMA #ifndef REAL_DMA
printk("scsi%d: IRQ%d not free, interrupts disabled\n", printk("scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq); instance->host_no, instance->irq);
...@@ -310,7 +310,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) ...@@ -310,7 +310,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt)
int sun3scsi_release (struct Scsi_Host *shpnt) int sun3scsi_release (struct Scsi_Host *shpnt)
{ {
if (shpnt->irq != SCSI_IRQ_NONE) if (shpnt->irq != SCSI_IRQ_NONE)
free_irq (shpnt->irq, NULL); free_irq(shpnt->irq, shpnt);
iounmap((void *)sun3_scsi_regp); iounmap((void *)sun3_scsi_regp);
......
...@@ -230,7 +230,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) ...@@ -230,7 +230,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt)
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (request_irq(instance->irq, scsi_sun3_intr, if (request_irq(instance->irq, scsi_sun3_intr,
0, "Sun3SCSI-5380VME", NULL)) { 0, "Sun3SCSI-5380VME", instance)) {
#ifndef REAL_DMA #ifndef REAL_DMA
printk("scsi%d: IRQ%d not free, interrupts disabled\n", printk("scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq); instance->host_no, instance->irq);
...@@ -279,7 +279,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) ...@@ -279,7 +279,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt)
int sun3scsi_release (struct Scsi_Host *shpnt) int sun3scsi_release (struct Scsi_Host *shpnt)
{ {
if (shpnt->irq != SCSI_IRQ_NONE) if (shpnt->irq != SCSI_IRQ_NONE)
free_irq (shpnt->irq, NULL); free_irq(shpnt->irq, shpnt);
iounmap((void *)sun3_scsi_regp); iounmap((void *)sun3_scsi_regp);
......
...@@ -259,7 +259,8 @@ int __init t128_detect(struct scsi_host_template * tpnt){ ...@@ -259,7 +259,8 @@ int __init t128_detect(struct scsi_host_template * tpnt){
instance->irq = NCR5380_probe_irq(instance, T128_IRQS); instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
if (instance->irq != SCSI_IRQ_NONE) if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", instance)) { if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128",
instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n", printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq); instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE; instance->irq = SCSI_IRQ_NONE;
...@@ -295,7 +296,7 @@ static int t128_release(struct Scsi_Host *shost) ...@@ -295,7 +296,7 @@ static int t128_release(struct Scsi_Host *shost)
NCR5380_local_declare(); NCR5380_local_declare();
NCR5380_setup(shost); NCR5380_setup(shost);
if (shost->irq) if (shost->irq)
free_irq(shost->irq, NULL); free_irq(shost->irq, shost);
NCR5380_exit(shost); NCR5380_exit(shost);
if (shost->io_port && shost->n_io_port) if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port); release_region(shost->io_port, shost->n_io_port);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment