Commit 7a86d469 authored by Paul Cercueil's avatar Paul Cercueil Committed by Jonathan Cameron

iio: buffer-dmaengine: Support new DMABUF based userspace API

Use the functions provided by the buffer-dma core to implement the
DMABUF userspace API in the buffer-dmaengine IIO buffer implementation.

Since we want to be able to transfer an arbitrary number of bytes and
not necesarily the full DMABUF, the associated scatterlist is converted
to an array of DMA addresses + lengths, which is then passed to
dmaengine_prep_slave_dma_array().
Signed-off-by: default avatarPaul Cercueil <paul@crapouillou.net>
Co-developed-by: default avatarNuno Sa <nuno.sa@analog.com>
Signed-off-by: default avatarNuno Sa <nuno.sa@analog.com>
Link: https://patch.msgid.link/20240620122726.41232-6-paul@crapouillou.netSigned-off-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
parent d8531890
......@@ -65,25 +65,62 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
iio_buffer_to_dmaengine_buffer(&queue->buffer);
struct dma_async_tx_descriptor *desc;
enum dma_transfer_direction dma_dir;
struct scatterlist *sgl;
struct dma_vec *vecs;
size_t max_size;
dma_cookie_t cookie;
size_t len_total;
unsigned int i;
int nents;
max_size = min(block->size, dmaengine_buffer->max_size);
max_size = round_down(max_size, dmaengine_buffer->align);
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
block->bytes_used = max_size;
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
dma_dir = DMA_DEV_TO_MEM;
} else {
else
dma_dir = DMA_MEM_TO_DEV;
}
if (!block->bytes_used || block->bytes_used > max_size)
return -EINVAL;
if (block->sg_table) {
sgl = block->sg_table->sgl;
nents = sg_nents_for_len(sgl, block->bytes_used);
if (nents < 0)
return nents;
vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC);
if (!vecs)
return -ENOMEM;
len_total = block->bytes_used;
for (i = 0; i < nents; i++) {
vecs[i].addr = sg_dma_address(sgl);
vecs[i].len = min(sg_dma_len(sgl), len_total);
len_total -= vecs[i].len;
sgl = sg_next(sgl);
}
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
block->phys_addr, block->bytes_used, dma_dir,
DMA_PREP_INTERRUPT);
desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan,
vecs, nents, dma_dir,
DMA_PREP_INTERRUPT);
kfree(vecs);
} else {
max_size = min(block->size, dmaengine_buffer->max_size);
max_size = round_down(max_size, dmaengine_buffer->align);
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
block->bytes_used = max_size;
if (!block->bytes_used || block->bytes_used > max_size)
return -EINVAL;
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
block->phys_addr,
block->bytes_used,
dma_dir,
DMA_PREP_INTERRUPT);
}
if (!desc)
return -ENOMEM;
......@@ -133,6 +170,13 @@ static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
.space_available = iio_dma_buffer_usage,
.release = iio_dmaengine_buffer_release,
.enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
.attach_dmabuf = iio_dma_buffer_attach_dmabuf,
.detach_dmabuf = iio_dma_buffer_detach_dmabuf,
.lock_queue = iio_dma_buffer_lock_queue,
.unlock_queue = iio_dma_buffer_unlock_queue,
.modes = INDIO_BUFFER_HARDWARE,
.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment