Commit 6c77ed22 authored by Xie Yongji's avatar Xie Yongji Committed by Michael S. Tsirkin

vduse: Support using userspace pages as bounce buffer

Introduce two APIs: vduse_domain_add_user_bounce_pages()
and vduse_domain_remove_user_bounce_pages() to support
adding and removing userspace pages for bounce buffers.
During adding and removing, the DMA data would be copied
from the kernel bounce pages to the userspace bounce pages
and back.
Signed-off-by: default avatarXie Yongji <xieyongji@bytedance.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Message-Id: <20220803045523.23851-4-xieyongji@bytedance.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 82eb46f9
...@@ -178,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain, ...@@ -178,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR)) map->orig_phys == INVALID_PHYS_ADDR))
return; return;
addr = page_address(map->bounce_page) + offset; addr = kmap_local_page(map->bounce_page);
do_bounce(map->orig_phys + offset, addr, sz, dir); do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
kunmap_local(addr);
size -= sz; size -= sz;
iova += sz; iova += sz;
} }
...@@ -210,20 +211,23 @@ static struct page * ...@@ -210,20 +211,23 @@ static struct page *
vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova) vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
{ {
struct vduse_bounce_map *map; struct vduse_bounce_map *map;
struct page *page; struct page *page = NULL;
read_lock(&domain->bounce_lock);
map = &domain->bounce_maps[iova >> PAGE_SHIFT]; map = &domain->bounce_maps[iova >> PAGE_SHIFT];
if (!map->bounce_page) if (domain->user_bounce_pages || !map->bounce_page)
return NULL; goto out;
page = map->bounce_page; page = map->bounce_page;
get_page(page); get_page(page);
out:
read_unlock(&domain->bounce_lock);
return page; return page;
} }
static void static void
vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain) vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
{ {
struct vduse_bounce_map *map; struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns; unsigned long pfn, bounce_pfns;
...@@ -243,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain) ...@@ -243,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
} }
} }
int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
struct page **pages, int count)
{
struct vduse_bounce_map *map;
int i, ret;
/* Now we don't support partial mapping */
if (count != (domain->bounce_size >> PAGE_SHIFT))
return -EINVAL;
write_lock(&domain->bounce_lock);
ret = -EEXIST;
if (domain->user_bounce_pages)
goto out;
for (i = 0; i < count; i++) {
map = &domain->bounce_maps[i];
if (map->bounce_page) {
/* Copy kernel page to user page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR)
memcpy_to_page(pages[i], 0,
page_address(map->bounce_page),
PAGE_SIZE);
__free_page(map->bounce_page);
}
map->bounce_page = pages[i];
get_page(pages[i]);
}
domain->user_bounce_pages = true;
ret = 0;
out:
write_unlock(&domain->bounce_lock);
return ret;
}
void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long i, count;
write_lock(&domain->bounce_lock);
if (!domain->user_bounce_pages)
goto out;
count = domain->bounce_size >> PAGE_SHIFT;
for (i = 0; i < count; i++) {
struct page *page = NULL;
map = &domain->bounce_maps[i];
if (WARN_ON(!map->bounce_page))
continue;
/* Copy user page to kernel page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR) {
page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
memcpy_from_page(page_address(page),
map->bounce_page, 0, PAGE_SIZE);
}
put_page(map->bounce_page);
map->bounce_page = page;
}
domain->user_bounce_pages = false;
out:
write_unlock(&domain->bounce_lock);
}
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain) void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
{ {
if (!domain->bounce_map) if (!domain->bounce_map)
...@@ -318,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain, ...@@ -318,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_init_bounce_map(domain)) if (vduse_domain_init_bounce_map(domain))
goto err; goto err;
read_lock(&domain->bounce_lock);
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa)) if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
goto err; goto err_unlock;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE); vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
read_unlock(&domain->bounce_lock);
return iova; return iova;
err_unlock:
read_unlock(&domain->bounce_lock);
err: err:
vduse_domain_free_iova(iovad, iova, size); vduse_domain_free_iova(iovad, iova, size);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -336,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, ...@@ -336,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
{ {
struct iova_domain *iovad = &domain->stream_iovad; struct iova_domain *iovad = &domain->stream_iovad;
read_lock(&domain->bounce_lock);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE); vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size); vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
read_unlock(&domain->bounce_lock);
vduse_domain_free_iova(iovad, dma_addr, size); vduse_domain_free_iova(iovad, dma_addr, size);
} }
...@@ -447,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file) ...@@ -447,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
spin_lock(&domain->iotlb_lock); spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX); vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
vduse_domain_free_bounce_pages(domain); vduse_domain_remove_user_bounce_pages(domain);
vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock); spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad); put_iova_domain(&domain->stream_iovad);
put_iova_domain(&domain->consistent_iovad); put_iova_domain(&domain->consistent_iovad);
...@@ -507,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) ...@@ -507,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
goto err_file; goto err_file;
domain->file = file; domain->file = file;
rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock); spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad, init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN); PAGE_SIZE, IOVA_START_PFN);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/iova.h> #include <linux/iova.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/vhost_iotlb.h> #include <linux/vhost_iotlb.h>
#include <linux/rwlock.h>
#define IOVA_START_PFN 1 #define IOVA_START_PFN 1
...@@ -34,6 +35,8 @@ struct vduse_iova_domain { ...@@ -34,6 +35,8 @@ struct vduse_iova_domain {
struct vhost_iotlb *iotlb; struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock; spinlock_t iotlb_lock;
struct file *file; struct file *file;
bool user_bounce_pages;
rwlock_t bounce_lock;
}; };
int vduse_domain_set_map(struct vduse_iova_domain *domain, int vduse_domain_set_map(struct vduse_iova_domain *domain,
...@@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, ...@@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain); void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
struct page **pages, int count);
void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
void vduse_domain_destroy(struct vduse_iova_domain *domain); void vduse_domain_destroy(struct vduse_iova_domain *domain);
struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit, struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment