Commit c9b6180d authored by Christoph Hellwig's avatar Christoph Hellwig

swiotlb-xen: use the same foreign page check everywhere

xen_dma_map_page uses a different and more complicated check for foreign
pages than the other three cache maintainance helpers.  Switch it to the
simpler pfn_valid method a well, and document the scheme with a single
improved comment in xen_dma_map_page.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
parent 922659ea
...@@ -53,23 +53,17 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, ...@@ -53,23 +53,17 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size, dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
unsigned long page_pfn = page_to_xen_pfn(page); unsigned long pfn = PFN_DOWN(dev_addr);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
unsigned long compound_pages =
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
/* /*
* Dom0 is mapped 1:1, while the Linux page can span across * Dom0 is mapped 1:1, and while the Linux page can span across multiple
* multiple Xen pages, it's not possible for it to contain a * Xen pages, it is not possible for it to contain a mix of local and
* mix of local and foreign Xen pages. So if the first xen_pfn * foreign Xen pages. Calling pfn_valid on a foreign mfn will always
* == mfn the page is local otherwise it's a foreign page * return false, so if pfn_valid returns true the pages is local and we
* grant-mapped in dom0. If the page is local we can safely * can use the native dma-direct functions, otherwise we call the Xen
* call the native dma_ops function, otherwise we call the xen * specific version.
* specific function.
*/ */
if (local) if (pfn_valid(pfn))
dma_direct_map_page(hwdev, page, offset, size, dir, attrs); dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
else else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
...@@ -79,14 +73,7 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, ...@@ -79,14 +73,7 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
unsigned long pfn = PFN_DOWN(handle); unsigned long pfn = PFN_DOWN(handle);
/*
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
* multiple Xen page, it's not possible to have a mix of local and
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
* foreign mfn will always return false. If the page is local we can
* safely call the native dma_ops function, otherwise we call the xen
* specific function.
*/
if (pfn_valid(pfn)) if (pfn_valid(pfn))
dma_direct_unmap_page(hwdev, handle, size, dir, attrs); dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment