Commit 1bfde037 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.8-5' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - add a warning when the atomic pool is depleted (David Rientjes)

 - protect the parameters of the new scatterlist helper macros (Marek
   Szyprowski )

* tag 'dma-mapping-5.8-5' of git://git.infradead.org/users/hch/dma-mapping:
  scatterlist: protect parameters of the sg_table related macros
  dma-mapping: warn when coherent pool is depleted
parents 25aadbd2 68d23705
...@@ -155,7 +155,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, ...@@ -155,7 +155,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
* Loop over each sg element in the given sg_table object. * Loop over each sg element in the given sg_table object.
*/ */
#define for_each_sgtable_sg(sgt, sg, i) \ #define for_each_sgtable_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
/* /*
* Loop over each sg element in the given *DMA mapped* sg_table object. * Loop over each sg element in the given *DMA mapped* sg_table object.
...@@ -163,7 +163,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, ...@@ -163,7 +163,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
* of the each element. * of the each element.
*/ */
#define for_each_sgtable_dma_sg(sgt, sg, i) \ #define for_each_sgtable_dma_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->nents, i) for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
/** /**
* sg_chain - Chain two sglists together * sg_chain - Chain two sglists together
...@@ -451,7 +451,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) ...@@ -451,7 +451,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
*/ */
#define for_each_sgtable_page(sgt, piter, pgoffset) \ #define for_each_sgtable_page(sgt, piter, pgoffset) \
for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset) for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
/** /**
* for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
...@@ -465,7 +465,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) ...@@ -465,7 +465,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* unit. * unit.
*/ */
#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset) for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
/* /*
......
...@@ -239,12 +239,16 @@ void *dma_alloc_from_pool(struct device *dev, size_t size, ...@@ -239,12 +239,16 @@ void *dma_alloc_from_pool(struct device *dev, size_t size,
} }
val = gen_pool_alloc(pool, size); val = gen_pool_alloc(pool, size);
if (val) { if (likely(val)) {
phys_addr_t phys = gen_pool_virt_to_phys(pool, val); phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
*ret_page = pfn_to_page(__phys_to_pfn(phys)); *ret_page = pfn_to_page(__phys_to_pfn(phys));
ptr = (void *)val; ptr = (void *)val;
memset(ptr, 0, size); memset(ptr, 0, size);
} else {
WARN_ONCE(1, "DMA coherent pool depleted, increase size "
"(recommended min coherent_pool=%zuK)\n",
gen_pool_size(pool) >> 9);
} }
if (gen_pool_avail(pool) < atomic_pool_size) if (gen_pool_avail(pool) < atomic_pool_size)
schedule_work(&atomic_pool_work); schedule_work(&atomic_pool_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment