Commit cedc9a47 authored by Jeff Garzik's avatar Jeff Garzik

libata: fix ATAPI DMA alignment issues

ATAPI needs to be padded to next 4 byte boundary, if misaligned.

Original work by me, many fixes from Tejun Heo.
parent ed39f731
...@@ -314,8 +314,15 @@ static int ahci_port_start(struct ata_port *ap) ...@@ -314,8 +314,15 @@ static int ahci_port_start(struct ata_port *ap)
return -ENOMEM; return -ENOMEM;
memset(pp, 0, sizeof(*pp)); memset(pp, 0, sizeof(*pp));
ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL);
if (!ap->pad) {
kfree(pp);
return -ENOMEM;
}
mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
if (!mem) { if (!mem) {
dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
kfree(pp); kfree(pp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -391,6 +398,7 @@ static void ahci_port_stop(struct ata_port *ap) ...@@ -391,6 +398,7 @@ static void ahci_port_stop(struct ata_port *ap)
ap->private_data = NULL; ap->private_data = NULL;
dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
pp->cmd_slot, pp->cmd_slot_dma); pp->cmd_slot, pp->cmd_slot_dma);
dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
kfree(pp); kfree(pp);
} }
...@@ -476,23 +484,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) ...@@ -476,23 +484,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static void ahci_fill_sg(struct ata_queued_cmd *qc) static void ahci_fill_sg(struct ata_queued_cmd *qc)
{ {
struct ahci_port_priv *pp = qc->ap->private_data; struct ahci_port_priv *pp = qc->ap->private_data;
unsigned int i; struct scatterlist *sg;
struct ahci_sg *ahci_sg;
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
/* /*
* Next, the S/G list. * Next, the S/G list.
*/ */
for (i = 0; i < qc->n_elem; i++) { ahci_sg = pp->cmd_tbl_sg;
u32 sg_len; ata_for_each_sg(sg, qc) {
dma_addr_t addr; dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
addr = sg_dma_address(&qc->sg[i]);
sg_len = sg_dma_len(&qc->sg[i]); ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff); ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); ahci_sg++;
pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1);
} }
} }
......
...@@ -2156,8 +2156,9 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) ...@@ -2156,8 +2156,9 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
static void ata_sg_clean(struct ata_queued_cmd *qc) static void ata_sg_clean(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->sg; struct scatterlist *sg = qc->__sg;
int dir = qc->dma_dir; int dir = qc->dma_dir;
void *pad_buf = NULL;
assert(qc->flags & ATA_QCFLAG_DMAMAP); assert(qc->flags & ATA_QCFLAG_DMAMAP);
assert(sg != NULL); assert(sg != NULL);
...@@ -2167,14 +2168,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) ...@@ -2167,14 +2168,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
DPRINTK("unmapping %u sg elements\n", qc->n_elem); DPRINTK("unmapping %u sg elements\n", qc->n_elem);
if (qc->flags & ATA_QCFLAG_SG) /* if we padded the buffer out to 32-bit bound, and data
* xfer direction is from-device, we must copy from the
* pad buffer back into the supplied buffer
*/
if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
if (qc->flags & ATA_QCFLAG_SG) {
dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
else /* restore last sg */
sg[qc->orig_n_elem - 1].length += qc->pad_len;
if (pad_buf) {
struct scatterlist *psg = &qc->pad_sgent;
void *addr = kmap_atomic(psg->page, KM_IRQ0);
memcpy(addr + psg->offset, pad_buf, qc->pad_len);
kunmap_atomic(psg->page, KM_IRQ0);
}
} else {
dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
sg_dma_len(&sg[0]), dir); sg_dma_len(&sg[0]), dir);
/* restore sg */
sg->length += qc->pad_len;
if (pad_buf)
memcpy(qc->buf_virt + sg->length - qc->pad_len,
pad_buf, qc->pad_len);
}
qc->flags &= ~ATA_QCFLAG_DMAMAP; qc->flags &= ~ATA_QCFLAG_DMAMAP;
qc->sg = NULL; qc->__sg = NULL;
} }
/** /**
...@@ -2190,15 +2212,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) ...@@ -2190,15 +2212,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
*/ */
static void ata_fill_sg(struct ata_queued_cmd *qc) static void ata_fill_sg(struct ata_queued_cmd *qc)
{ {
struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
unsigned int idx, nelem; struct scatterlist *sg;
unsigned int idx;
assert(sg != NULL); assert(qc->__sg != NULL);
assert(qc->n_elem > 0); assert(qc->n_elem > 0);
idx = 0; idx = 0;
for (nelem = qc->n_elem; nelem; nelem--,sg++) { ata_for_each_sg(sg, qc) {
u32 addr, offset; u32 addr, offset;
u32 sg_len, len; u32 sg_len, len;
...@@ -2289,11 +2311,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) ...@@ -2289,11 +2311,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
qc->flags |= ATA_QCFLAG_SINGLE; qc->flags |= ATA_QCFLAG_SINGLE;
memset(&qc->sgent, 0, sizeof(qc->sgent)); memset(&qc->sgent, 0, sizeof(qc->sgent));
qc->sg = &qc->sgent; qc->__sg = &qc->sgent;
qc->n_elem = 1; qc->n_elem = 1;
qc->orig_n_elem = 1;
qc->buf_virt = buf; qc->buf_virt = buf;
sg = qc->sg; sg = qc->__sg;
sg->page = virt_to_page(buf); sg->page = virt_to_page(buf);
sg->offset = (unsigned long) buf & ~PAGE_MASK; sg->offset = (unsigned long) buf & ~PAGE_MASK;
sg->length = buflen; sg->length = buflen;
...@@ -2317,8 +2340,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, ...@@ -2317,8 +2340,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem) unsigned int n_elem)
{ {
qc->flags |= ATA_QCFLAG_SG; qc->flags |= ATA_QCFLAG_SG;
qc->sg = sg; qc->__sg = sg;
qc->n_elem = n_elem; qc->n_elem = n_elem;
qc->orig_n_elem = n_elem;
} }
/** /**
...@@ -2338,9 +2362,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) ...@@ -2338,9 +2362,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
int dir = qc->dma_dir; int dir = qc->dma_dir;
struct scatterlist *sg = qc->sg; struct scatterlist *sg = qc->__sg;
dma_addr_t dma_address; dma_addr_t dma_address;
/* we must lengthen transfers to end on a 32-bit boundary */
qc->pad_len = sg->length & 3;
if (qc->pad_len) {
void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
struct scatterlist *psg = &qc->pad_sgent;
assert(qc->dev->class == ATA_DEV_ATAPI);
memset(pad_buf, 0, ATA_DMA_PAD_SZ);
if (qc->tf.flags & ATA_TFLAG_WRITE)
memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
qc->pad_len);
sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
/* trim sg */
sg->length -= qc->pad_len;
DPRINTK("padding done, sg->length=%u pad_len=%u\n",
sg->length, qc->pad_len);
}
dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
sg->length, dir); sg->length, dir);
if (dma_mapping_error(dma_address)) if (dma_mapping_error(dma_address))
...@@ -2372,12 +2419,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) ...@@ -2372,12 +2419,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
static int ata_sg_setup(struct ata_queued_cmd *qc) static int ata_sg_setup(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->sg; struct scatterlist *sg = qc->__sg;
struct scatterlist *lsg = &sg[qc->n_elem - 1];
int n_elem, dir; int n_elem, dir;
VPRINTK("ENTER, ata%u\n", ap->id); VPRINTK("ENTER, ata%u\n", ap->id);
assert(qc->flags & ATA_QCFLAG_SG); assert(qc->flags & ATA_QCFLAG_SG);
/* we must lengthen transfers to end on a 32-bit boundary */
qc->pad_len = lsg->length & 3;
if (qc->pad_len) {
void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
struct scatterlist *psg = &qc->pad_sgent;
unsigned int offset;
assert(qc->dev->class == ATA_DEV_ATAPI);
memset(pad_buf, 0, ATA_DMA_PAD_SZ);
/*
* psg->page/offset are used to copy to-be-written
* data in this function or read data in ata_sg_clean.
*/
offset = lsg->offset + lsg->length - qc->pad_len;
psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
psg->offset = offset_in_page(offset);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
void *addr = kmap_atomic(psg->page, KM_IRQ0);
memcpy(pad_buf, addr + psg->offset, qc->pad_len);
kunmap_atomic(psg->page, KM_IRQ0);
}
sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
/* trim last sg */
lsg->length -= qc->pad_len;
DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
qc->n_elem - 1, lsg->length, qc->pad_len);
}
dir = qc->dma_dir; dir = qc->dma_dir;
n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
if (n_elem < 1) if (n_elem < 1)
...@@ -2655,7 +2737,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, ...@@ -2655,7 +2737,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
static void ata_pio_sector(struct ata_queued_cmd *qc) static void ata_pio_sector(struct ata_queued_cmd *qc)
{ {
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct scatterlist *sg = qc->sg; struct scatterlist *sg = qc->__sg;
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct page *page; struct page *page;
unsigned int offset; unsigned int offset;
...@@ -2705,7 +2787,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) ...@@ -2705,7 +2787,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{ {
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct scatterlist *sg = qc->sg; struct scatterlist *sg = qc->__sg;
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct page *page; struct page *page;
unsigned char *buf; unsigned char *buf;
...@@ -2738,7 +2820,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) ...@@ -2738,7 +2820,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
return; return;
} }
sg = &qc->sg[qc->cursg]; sg = &qc->__sg[qc->cursg];
page = sg->page; page = sg->page;
offset = sg->offset + qc->cursg_ofs; offset = sg->offset + qc->cursg_ofs;
...@@ -3145,7 +3227,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, ...@@ -3145,7 +3227,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
qc = ata_qc_new(ap); qc = ata_qc_new(ap);
if (qc) { if (qc) {
qc->sg = NULL; qc->__sg = NULL;
qc->flags = 0; qc->flags = 0;
qc->scsicmd = NULL; qc->scsicmd = NULL;
qc->ap = ap; qc->ap = ap;
...@@ -3837,6 +3919,12 @@ int ata_port_start (struct ata_port *ap) ...@@ -3837,6 +3919,12 @@ int ata_port_start (struct ata_port *ap)
if (!ap->prd) if (!ap->prd)
return -ENOMEM; return -ENOMEM;
ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL);
if (!ap->pad) {
dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
return -ENOMEM;
}
DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
return 0; return 0;
...@@ -3859,6 +3947,7 @@ void ata_port_stop (struct ata_port *ap) ...@@ -3859,6 +3947,7 @@ void ata_port_stop (struct ata_port *ap)
struct device *dev = ap->host_set->dev; struct device *dev = ap->host_set->dev;
dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
} }
void ata_host_stop (struct ata_host_set *host_set) void ata_host_stop (struct ata_host_set *host_set)
......
...@@ -150,10 +150,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, ...@@ -150,10 +150,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
qc->scsidone = done; qc->scsidone = done;
if (cmd->use_sg) { if (cmd->use_sg) {
qc->sg = (struct scatterlist *) cmd->request_buffer; qc->__sg = (struct scatterlist *) cmd->request_buffer;
qc->n_elem = cmd->use_sg; qc->n_elem = cmd->use_sg;
} else { } else {
qc->sg = &qc->sgent; qc->__sg = &qc->sgent;
qc->n_elem = 1; qc->n_elem = 1;
} }
} else { } else {
...@@ -364,6 +364,16 @@ int ata_scsi_slave_config(struct scsi_device *sdev) ...@@ -364,6 +364,16 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
*/ */
blk_queue_max_sectors(sdev->request_queue, 2048); blk_queue_max_sectors(sdev->request_queue, 2048);
} }
/*
* SATA DMA transfers must be multiples of 4 byte, so
* we need to pad ATAPI transfers using an extra sg.
* Decrement max hw segments accordingly.
*/
if (dev->class == ATA_DEV_ATAPI) {
request_queue_t *q = sdev->request_queue;
blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
}
} }
return 0; /* scsi layer doesn't check return value, sigh */ return 0; /* scsi layer doesn't check return value, sigh */
......
...@@ -268,16 +268,17 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) ...@@ -268,16 +268,17 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
static void qs_fill_sg(struct ata_queued_cmd *qc) static void qs_fill_sg(struct ata_queued_cmd *qc)
{ {
struct scatterlist *sg = qc->sg; struct scatterlist *sg;
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct qs_port_priv *pp = ap->private_data; struct qs_port_priv *pp = ap->private_data;
unsigned int nelem; unsigned int nelem;
u8 *prd = pp->pkt + QS_CPB_BYTES; u8 *prd = pp->pkt + QS_CPB_BYTES;
assert(sg != NULL); assert(qc->__sg != NULL);
assert(qc->n_elem > 0); assert(qc->n_elem > 0);
for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { nelem = 0;
ata_for_each_sg(sg, qc) {
u64 addr; u64 addr;
u32 len; u32 len;
...@@ -291,6 +292,7 @@ static void qs_fill_sg(struct ata_queued_cmd *qc) ...@@ -291,6 +292,7 @@ static void qs_fill_sg(struct ata_queued_cmd *qc)
VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
(unsigned long long)addr, len); (unsigned long long)addr, len);
nelem++;
} }
} }
......
...@@ -449,14 +449,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, ...@@ -449,14 +449,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
static void pdc20621_dma_prep(struct ata_queued_cmd *qc) static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
{ {
struct scatterlist *sg = qc->sg; struct scatterlist *sg;
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data; struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base; void __iomem *mmio = ap->host_set->mmio_base;
struct pdc_host_priv *hpriv = ap->host_set->private_data; struct pdc_host_priv *hpriv = ap->host_set->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio; void __iomem *dimm_mmio = hpriv->dimm_mmio;
unsigned int portno = ap->port_no; unsigned int portno = ap->port_no;
unsigned int i, last, idx, total_len = 0, sgt_len; unsigned int i, idx, total_len = 0, sgt_len;
u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
assert(qc->flags & ATA_QCFLAG_DMAMAP); assert(qc->flags & ATA_QCFLAG_DMAMAP);
...@@ -469,12 +469,11 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) ...@@ -469,12 +469,11 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
/* /*
* Build S/G table * Build S/G table
*/ */
last = qc->n_elem;
idx = 0; idx = 0;
for (i = 0; i < last; i++) { ata_for_each_sg(sg, qc) {
buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i])); buf[idx++] = cpu_to_le32(sg_dma_address(sg));
buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i])); buf[idx++] = cpu_to_le32(sg_dma_len(sg));
total_len += sg_dma_len(&sg[i]); total_len += sg_dma_len(sg);
} }
buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
sgt_len = idx * 4; sgt_len = idx * 4;
......
...@@ -154,6 +154,10 @@ enum { ...@@ -154,6 +154,10 @@ enum {
ATA_SHIFT_UDMA = 0, ATA_SHIFT_UDMA = 0,
ATA_SHIFT_MWDMA = 8, ATA_SHIFT_MWDMA = 8,
ATA_SHIFT_PIO = 11, ATA_SHIFT_PIO = 11,
/* size of buffer to pad xfers ending on unaligned boundaries */
ATA_DMA_PAD_SZ = 4,
ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
}; };
enum pio_task_states { enum pio_task_states {
...@@ -237,9 +241,12 @@ struct ata_queued_cmd { ...@@ -237,9 +241,12 @@ struct ata_queued_cmd {
unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned long flags; /* ATA_QCFLAG_xxx */
unsigned int tag; unsigned int tag;
unsigned int n_elem; unsigned int n_elem;
unsigned int orig_n_elem;
int dma_dir; int dma_dir;
unsigned int pad_len;
unsigned int nsect; unsigned int nsect;
unsigned int cursect; unsigned int cursect;
...@@ -250,9 +257,11 @@ struct ata_queued_cmd { ...@@ -250,9 +257,11 @@ struct ata_queued_cmd {
unsigned int cursg_ofs; unsigned int cursg_ofs;
struct scatterlist sgent; struct scatterlist sgent;
struct scatterlist pad_sgent;
void *buf_virt; void *buf_virt;
struct scatterlist *sg; /* DO NOT iterate over __sg manually, use ata_for_each_sg() */
struct scatterlist *__sg;
ata_qc_cb_t complete_fn; ata_qc_cb_t complete_fn;
...@@ -295,6 +304,9 @@ struct ata_port { ...@@ -295,6 +304,9 @@ struct ata_port {
struct ata_prd *prd; /* our SG list */ struct ata_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */ dma_addr_t prd_dma; /* and its DMA mapping */
void *pad; /* array of DMA pad buffers */
dma_addr_t pad_dma;
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
u8 ctl; /* cache of ATA control register */ u8 ctl; /* cache of ATA control register */
...@@ -458,6 +470,19 @@ extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); ...@@ -458,6 +470,19 @@ extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
static inline struct scatterlist *
ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
{
if (sg == &qc->pad_sgent)
return NULL;
if (++sg - qc->__sg < qc->n_elem)
return sg;
return qc->pad_len ? &qc->pad_sgent : NULL;
}
#define ata_for_each_sg(sg, qc) \
for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc))
static inline unsigned int ata_tag_valid(unsigned int tag) static inline unsigned int ata_tag_valid(unsigned int tag)
{ {
return (tag < ATA_MAX_QUEUE) ? 1 : 0; return (tag < ATA_MAX_QUEUE) ? 1 : 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment