Commit 63015bc9 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by James Bottomley

[SCSI] ipr: convert to use the data buffer accessors

- remove the unnecessary map_single path.

- convert to use the new accessors for the sg lists and the
parameters.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent c13e5566
...@@ -539,32 +539,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) ...@@ -539,32 +539,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
return ipr_cmd; return ipr_cmd;
} }
/**
* ipr_unmap_sglist - Unmap scatterlist if mapped
* @ioa_cfg: ioa config struct
* @ipr_cmd: ipr command struct
*
* Return value:
* nothing
**/
static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
if (ipr_cmd->dma_use_sg) {
if (scsi_cmd->use_sg > 0) {
pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
scsi_cmd->use_sg,
scsi_cmd->sc_data_direction);
} else {
pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
scsi_cmd->request_bufflen,
scsi_cmd->sc_data_direction);
}
}
}
/** /**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct * @ioa_cfg: ioa config struct
...@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) ...@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_cmd->result |= (DID_ERROR << 16); scsi_cmd->result |= (DID_ERROR << 16);
ipr_unmap_sglist(ioa_cfg, ipr_cmd); scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd); scsi_cmd->scsi_done(scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
} }
...@@ -4285,93 +4259,55 @@ static irqreturn_t ipr_isr(int irq, void *devp) ...@@ -4285,93 +4259,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd) struct ipr_cmnd *ipr_cmd)
{ {
int i; int i, nseg;
struct scatterlist *sglist; struct scatterlist *sg;
u32 length; u32 length;
u32 ioadl_flags = 0; u32 ioadl_flags = 0;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
length = scsi_cmd->request_bufflen; length = scsi_bufflen(scsi_cmd);
if (!length)
if (length == 0)
return 0; return 0;
if (scsi_cmd->use_sg) { nseg = scsi_dma_map(scsi_cmd);
ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, if (nseg < 0) {
scsi_cmd->request_buffer, dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
scsi_cmd->use_sg, return -1;
scsi_cmd->sc_data_direction); }
if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
ioarcb->write_data_transfer_length = cpu_to_be32(length);
ioarcb->write_ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_READ;
ioarcb->read_data_transfer_length = cpu_to_be32(length);
ioarcb->read_ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
}
sglist = scsi_cmd->request_buffer;
if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { ipr_cmd->dma_use_sg = nseg;
ioadl = ioarcb->add_data.u.ioadl;
ioarcb->write_ioadl_addr =
cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
offsetof(struct ipr_ioarcb, add_data));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
}
for (i = 0; i < ipr_cmd->dma_use_sg; i++) { if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
ioadl[i].flags_and_data_len = ioadl_flags = IPR_IOADL_FLAGS_WRITE;
cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
ioadl[i].address = ioarcb->write_data_transfer_length = cpu_to_be32(length);
cpu_to_be32(sg_dma_address(&sglist[i])); ioarcb->write_ioadl_len =
} cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_READ;
ioarcb->read_data_transfer_length = cpu_to_be32(length);
ioarcb->read_ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
}
if (likely(ipr_cmd->dma_use_sg)) { if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
ioadl[i-1].flags_and_data_len |= ioadl = ioarcb->add_data.u.ioadl;
cpu_to_be32(IPR_IOADL_FLAGS_LAST); ioarcb->write_ioadl_addr =
return 0; cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
} else offsetof(struct ipr_ioarcb, add_data));
dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
} else { }
if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
ioarcb->write_data_transfer_length = cpu_to_be32(length);
ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_READ;
ioarcb->read_data_transfer_length = cpu_to_be32(length);
ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
}
ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
scsi_cmd->request_buffer, length, ioadl[i].flags_and_data_len =
scsi_cmd->sc_data_direction); cpu_to_be32(ioadl_flags | sg_dma_len(sg));
ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
ioadl = ioarcb->add_data.u.ioadl;
ioarcb->write_ioadl_addr =
cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
offsetof(struct ipr_ioarcb, add_data));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
ipr_cmd->dma_use_sg = 1;
ioadl[0].flags_and_data_len =
cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
return 0;
} else
dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
} }
return -1; ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
return 0;
} }
/** /**
...@@ -4434,7 +4370,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) ...@@ -4434,7 +4370,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->needs_sync_complete = 1; res->needs_sync_complete = 1;
res->in_erp = 0; res->in_erp = 0;
} }
ipr_unmap_sglist(ioa_cfg, ipr_cmd); scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd); scsi_cmd->scsi_done(scsi_cmd);
} }
...@@ -4812,7 +4748,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, ...@@ -4812,7 +4748,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break; break;
} }
ipr_unmap_sglist(ioa_cfg, ipr_cmd); scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd); scsi_cmd->scsi_done(scsi_cmd);
} }
...@@ -4833,10 +4769,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) ...@@ -4833,10 +4769,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
ipr_unmap_sglist(ioa_cfg, ipr_cmd); scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd); scsi_cmd->scsi_done(scsi_cmd);
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment