Commit 6f2dc3d3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.8-2' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping helpers from Christoph Hellwig:
 "These were in a separate stable branch so that various media and drm
  trees could pull the in for bug fixes, but looking at linux-next that
  hasn't actually happened yet. Still sending the APIs to you in the
  hope that these bug fixes get picked up for 5.8 in one way or another.

  Summary:

   - add DMA mapping helpers for struct sg_table (Marek Szyprowski)"

* tag 'dma-mapping-5.8-2' of git://git.infradead.org/users/hch/dma-mapping:
  iommu: add generic helper for mapping sgtable objects
  scatterlist: add generic wrappers for iterating over sgtable objects
  dma-mapping: add generic helpers for mapping sgtable objects
parents 1ee18de9 48530d9f
......@@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
/**
* dma_map_sgtable - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
* Maps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After success the
* ownership for the buffer is transferred to the DMA domain. One has to
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
* ownership of the buffer back to the CPU domain before touching the
* buffer by the CPU.
*
* Returns 0 on success or -EINVAL on error during mapping the buffer.
*/
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
int nents;
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
if (nents <= 0)
return -EINVAL;
sgt->nents = nents;
return 0;
}
/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the unmap operation
*
* Unmaps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After this function
* the ownership of the buffer is transferred back to the CPU domain.
*/
static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
}
/**
* dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
*
* Performs the needed cache synchronization and moves the ownership of the
* buffer back to the CPU domain, so it is safe to perform any access to it
* by the CPU. Before doing any further DMA operations, one has to transfer
* the ownership of the buffer back to the DMA domain by calling the
* dma_sync_sgtable_for_device().
*/
static inline void dma_sync_sgtable_for_cpu(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
}
/**
* dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
*
* Performs the needed cache synchronization and moves the ownership of the
* buffer back to the DMA domain, so it is safe to perform the DMA operation.
* Once finished, one has to call dma_sync_sgtable_for_cpu() or
* dma_unmap_sgtable().
*/
static inline void dma_sync_sgtable_for_device(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
......
......@@ -466,6 +466,22 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
/**
* iommu_map_sgtable - Map the given buffer to the IOMMU domain
* @domain: The IOMMU domain to perform the mapping
* @iova: The start address to map the buffer
* @sgt: The sg_table object describing the buffer
* @prot: IOMMU protection bits
*
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
}
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,
......
......@@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
/*
* Loop over each sg element in the given sg_table object.
*/
#define for_each_sgtable_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
/*
* Loop over each sg element in the given *DMA mapped* sg_table object.
* Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
* of the each element.
*/
#define for_each_sgtable_dma_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->nents, i)
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
......@@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
* @pgoffset: starting page offset
* @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
* In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
......@@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
* @dma_iter: page iterator to hold current page
* @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
* @pgoffset: starting page offset
* @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
* In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
/**
* for_each_sgtable_page - iterate over all pages in the sg_table object
* @sgt: sg_table object to iterate over
* @piter: page iterator to hold current page
* @pgoffset: starting page offset (in pages)
*
* Iterates over the all memory pages in the buffer described by
* a scatterlist stored in the given sg_table object.
* See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sgtable_page(sgt, piter, pgoffset) \
for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
/**
* for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
* @sgt: sg_table object to iterate over
* @dma_iter: DMA page iterator to hold current page
* @pgoffset: starting page offset (in pages)
*
* Iterates over the all DMA mapped pages in the buffer described by
* a scatterlist stored in the given sg_table object.
* See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
* unit.
*/
#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
/*
* Mapping sg iterator
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment