Commit 7e7b6965 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.17' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - refactor the dma-direct coherent allocator

 - turn an macro into an inline in scatterlist.h (Logan Gunthorpe)

* tag 'dma-mapping-5.17' of git://git.infradead.org/users/hch/dma-mapping:
  lib/scatterlist: cleanup macros into static inline functions
  dma-direct: add a dma_direct_use_pool helper
  dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages
  dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals
  dma-direct: warn if there is no pool for force unencrypted allocations
  dma-direct: fail allocations that can't be made coherent
  dma-direct: refactor the !coherent checks in dma_direct_alloc
  dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
  dma-direct: clean up the remapping checks in dma_direct_alloc
  dma-direct: always leak memory that can't be re-encrypted
  dma-direct: don't call dma_set_decrypted for remapped allocations
  dma-direct: factor out dma_set_{de,en}crypted helpers
parents daadb3bd f857acfc
......@@ -69,10 +69,27 @@ struct sg_append_table {
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
#define sg_is_last(sg) ((sg)->page_link & SG_END)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
static inline unsigned int __sg_flags(struct scatterlist *sg)
{
return sg->page_link & SG_PAGE_LINK_MASK;
}
static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
{
return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
}
static inline bool sg_is_chain(struct scatterlist *sg)
{
return __sg_flags(sg) & SG_CHAIN;
}
static inline bool sg_is_last(struct scatterlist *sg)
{
return __sg_flags(sg) & SG_END;
}
/**
* sg_assign_page - Assign a given page to an SG entry
......@@ -92,7 +109,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
......@@ -126,7 +143,7 @@ static inline struct page *sg_page(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment