Commit 9bdc7304 authored by Oleksandr Andrushchenko's avatar Oleksandr Andrushchenko Committed by Boris Ostrovsky

xen/grant-table: Allow allocating buffers suitable for DMA

Extend grant table module API to allow allocating buffers that can
be used for DMA operations and mapping foreign grant references
on top of those.
The resulting buffer is similar to the one allocated by the balloon
driver in that proper memory reservation is made by
({increase|decrease}_reservation and VA mappings are updated if
needed).
This is useful for sharing foreign buffers with HW drivers which
cannot work with scattered buffers provided by the balloon driver,
but require DMAable memory instead.
Signed-off-by: default avatarOleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent ae4c51a5
...@@ -161,6 +161,20 @@ config XEN_GRANT_DEV_ALLOC ...@@ -161,6 +161,20 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel. or as part of an inter-domain shared memory channel.
config XEN_GRANT_DMA_ALLOC
bool "Allow allocating DMA capable buffers with grant reference module"
depends on XEN && HAS_DMA
help
Extends grant table module API to allow allocating DMA capable
buffers and mapping foreign grant references on top of it.
The resulting buffer is similar to one allocated by the balloon
driver in that proper memory reservation is made by
({increase|decrease}_reservation and VA mappings are updated if
needed).
This is useful for sharing foreign buffers with HW drivers which
cannot work with scattered buffers provided by the balloon driver,
but require DMAable memory instead.
config SWIOTLB_XEN config SWIOTLB_XEN
def_bool y def_bool y
select SWIOTLB select SWIOTLB
......
...@@ -45,6 +45,9 @@ ...@@ -45,6 +45,9 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
#include <linux/dma-mapping.h>
#endif
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -57,6 +60,7 @@ ...@@ -57,6 +60,7 @@
#ifdef CONFIG_X86 #ifdef CONFIG_X86
#include <asm/xen/cpuid.h> #include <asm/xen/cpuid.h>
#endif #endif
#include <xen/mem-reservation.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
...@@ -838,6 +842,99 @@ void gnttab_free_pages(int nr_pages, struct page **pages) ...@@ -838,6 +842,99 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
} }
EXPORT_SYMBOL_GPL(gnttab_free_pages); EXPORT_SYMBOL_GPL(gnttab_free_pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/**
* gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
* @args: arguments to the function
*/
int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
{
unsigned long pfn, start_pfn;
size_t size;
int i, ret;
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
&args->dev_bus_addr,
GFP_KERNEL | __GFP_NOWARN);
else
args->vaddr = dma_alloc_wc(args->dev, size,
&args->dev_bus_addr,
GFP_KERNEL | __GFP_NOWARN);
if (!args->vaddr) {
pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
return -ENOMEM;
}
start_pfn = __phys_to_pfn(args->dev_bus_addr);
for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
pfn++, i++) {
struct page *page = pfn_to_page(pfn);
args->pages[i] = page;
args->frames[i] = xen_page_to_gfn(page);
xenmem_reservation_scrub_page(page);
}
xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
pr_debug("Failed to decrease reservation for DMA buffer\n");
ret = -EFAULT;
goto fail;
}
ret = gnttab_pages_set_private(args->nr_pages, args->pages);
if (ret < 0)
goto fail;
return 0;
fail:
gnttab_dma_free_pages(args);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
/**
* gnttab_dma_free_pages - free DMAable pages
* @args: arguments to the function
*/
int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
{
size_t size;
int i, ret;
gnttab_pages_clear_private(args->nr_pages, args->pages);
for (i = 0; i < args->nr_pages; i++)
args->frames[i] = page_to_xen_pfn(args->pages[i]);
ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
pr_debug("Failed to decrease reservation for DMA buffer\n");
ret = -EFAULT;
} else {
ret = 0;
}
xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
args->frames);
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
dma_free_coherent(args->dev, size,
args->vaddr, args->dev_bus_addr);
else
dma_free_wc(args->dev, size,
args->vaddr, args->dev_bus_addr);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
#endif
/* Handling of paged out grant targets (GNTST_eagain) */ /* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256 #define MAX_DELAY 256
static inline void static inline void
......
...@@ -198,6 +198,24 @@ void gnttab_free_auto_xlat_frames(void); ...@@ -198,6 +198,24 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages); int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages); void gnttab_free_pages(int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
struct gnttab_dma_alloc_args {
/* Device for which DMA memory will be/was allocated. */
struct device *dev;
/* If set then DMA buffer is coherent and write-combine otherwise. */
bool coherent;
int nr_pages;
struct page **pages;
xen_pfn_t *frames;
void *vaddr;
dma_addr_t dev_bus_addr;
};
int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
#endif
int gnttab_pages_set_private(int nr_pages, struct page **pages); int gnttab_pages_set_private(int nr_pages, struct page **pages);
void gnttab_pages_clear_private(int nr_pages, struct page **pages); void gnttab_pages_clear_private(int nr_pages, struct page **pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment