Commit 31130a16 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - add dma-buf functionality to Xen grant table handling

 - fix for booting the kernel as Xen PVH dom0

 - fix for booting the kernel as a Xen PV guest with
   CONFIG_DEBUG_VIRTUAL enabled

 - other minor performance and style fixes

* tag 'for-linus-4.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/balloon: fix balloon initialization for PVH Dom0
  xen: don't use privcmd_call() from xen_mc_flush()
  xen/pv: Call get_cpu_address_sizes to set x86_virt/phys_bits
  xen/biomerge: Use true and false for boolean values
  xen/gntdev: don't dereference a null gntdev_dmabuf on allocation failure
  xen/spinlock: Don't use pvqspinlock if only 1 vCPU
  xen/gntdev: Implement dma-buf import functionality
  xen/gntdev: Implement dma-buf export functionality
  xen/gntdev: Add initial support for dma-buf UAPI
  xen/gntdev: Make private routines/structures accessible
  xen/gntdev: Allow mappings for DMA buffers
  xen/grant-table: Allow allocating buffers suitable for DMA
  xen/balloon: Share common memory reservation routines
  xen/grant-table: Make set/clear page private code shared
parents 1202f4fd 3596924a
...@@ -209,7 +209,7 @@ extern struct { char _entry[32]; } hypercall_page[]; ...@@ -209,7 +209,7 @@ extern struct { char _entry[32]; } hypercall_page[];
}) })
static inline long static inline long
privcmd_call(unsigned call, xen_single_call(unsigned int call,
unsigned long a1, unsigned long a2, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a3, unsigned long a4,
unsigned long a5) unsigned long a5)
...@@ -217,16 +217,29 @@ privcmd_call(unsigned call, ...@@ -217,16 +217,29 @@ privcmd_call(unsigned call,
__HYPERCALL_DECLS; __HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5); __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
stac();
asm volatile(CALL_NOSPEC asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM : __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call]) : [thunk_target] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5); : __HYPERCALL_CLOBBER5);
clac();
return (long)__res; return (long)__res;
} }
static inline long
privcmd_call(unsigned int call,
unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4,
unsigned long a5)
{
long res;
stac();
res = xen_single_call(call, a1, a2, a3, a4, a5);
clac();
return res;
}
static inline int static inline int
HYPERVISOR_set_trap_table(struct trap_info *table) HYPERVISOR_set_trap_table(struct trap_info *table)
{ {
......
...@@ -905,7 +905,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -905,7 +905,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
apply_forced_caps(c); apply_forced_caps(c);
} }
static void get_cpu_address_sizes(struct cpuinfo_x86 *c) void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{ {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
......
...@@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], ...@@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[]; *const __x86_cpu_dev_end[];
extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern u32 get_scattered_cpuid_leaf(unsigned int level, extern u32 get_scattered_cpuid_leaf(unsigned int level,
......
...@@ -1256,6 +1256,9 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1256,6 +1256,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
get_cpu_cap(&boot_cpu_data); get_cpu_cap(&boot_cpu_data);
x86_configure_nx(); x86_configure_nx();
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);
/* Let's presume PV guests always boot on vCPU with id 0. */ /* Let's presume PV guests always boot on vCPU with id 0. */
per_cpu(xen_vcpu_id, 0) = 0; per_cpu(xen_vcpu_id, 0) = 0;
......
...@@ -80,9 +80,9 @@ void xen_mc_flush(void) ...@@ -80,9 +80,9 @@ void xen_mc_flush(void)
and just do the call directly. */ and just do the call directly. */
mc = &b->entries[0]; mc = &b->entries[0];
mc->result = privcmd_call(mc->op, mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1],
mc->args[0], mc->args[1], mc->args[2], mc->args[2], mc->args[3],
mc->args[3], mc->args[4]); mc->args[4]);
ret = mc->result < 0; ret = mc->result < 0;
break; break;
......
...@@ -130,6 +130,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); ...@@ -130,6 +130,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void) void __init xen_init_spinlocks(void)
{ {
/* Don't need to use pvqspinlock code if there is only 1 vCPU. */
if (num_possible_cpus() == 1)
xen_pvspin = false;
if (!xen_pvspin) { if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
return; return;
......
...@@ -152,6 +152,16 @@ config XEN_GNTDEV ...@@ -152,6 +152,16 @@ config XEN_GNTDEV
help help
Allows userspace processes to use grants. Allows userspace processes to use grants.
config XEN_GNTDEV_DMABUF
bool "Add support for dma-buf grant access device driver extension"
depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
help
Allows userspace processes and kernel modules to use Xen backed
dma-buf implementation. With this extension grant references to
the pages of an imported dma-buf can be exported for other domain
use and grant references coming from a foreign domain can be
converted into a local dma-buf for local export.
config XEN_GRANT_DEV_ALLOC config XEN_GRANT_DEV_ALLOC
tristate "User-space grant reference allocator driver" tristate "User-space grant reference allocator driver"
depends on XEN depends on XEN
...@@ -161,6 +171,20 @@ config XEN_GRANT_DEV_ALLOC ...@@ -161,6 +171,20 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel. or as part of an inter-domain shared memory channel.
config XEN_GRANT_DMA_ALLOC
bool "Allow allocating DMA capable buffers with grant reference module"
depends on XEN && HAS_DMA
help
Extends grant table module API to allow allocating DMA capable
buffers and mapping foreign grant references on top of it.
The resulting buffer is similar to one allocated by the balloon
driver in that proper memory reservation is made by
({increase|decrease}_reservation and VA mappings are updated if
needed).
This is useful for sharing foreign buffers with HW drivers which
cannot work with scattered buffers provided by the balloon driver,
but require DMAable memory instead.
config SWIOTLB_XEN config SWIOTLB_XEN
def_bool y def_bool y
select SWIOTLB select SWIOTLB
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
obj-y += mem-reservation.o
obj-y += events/ obj-y += events/
obj-y += xenbus/ obj-y += xenbus/
...@@ -40,5 +41,6 @@ obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o ...@@ -40,5 +41,6 @@ obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o xen-gntdev-y := gntdev.o
xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o xen-privcmd-y := privcmd.o privcmd-buf.o
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#include <xen/balloon.h> #include <xen/balloon.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/mem-reservation.h>
static int xen_hotplug_unpopulated; static int xen_hotplug_unpopulated;
...@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); ...@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
#define GFP_BALLOON \ #define GFP_BALLOON \
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}
/* balloon_append: add the given page to the balloon. */ /* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page) static void __balloon_append(struct page *page)
{ {
...@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
int rc; int rc;
unsigned long i; unsigned long i;
struct page *page; struct page *page;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
if (nr_pages > ARRAY_SIZE(frame_list)) if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list); nr_pages = ARRAY_SIZE(frame_list);
...@@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
break; break;
} }
/* XENMEM_populate_physmap requires a PFN based on Xen
* granularity.
*/
frame_list[i] = page_to_xen_pfn(page); frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page); page = balloon_next_page(page);
} }
set_xen_guest_handle(reservation.extent_start, frame_list); rc = xenmem_reservation_increase(nr_pages, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
if (rc <= 0) if (rc <= 0)
return BP_EAGAIN; return BP_EAGAIN;
...@@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
page = balloon_retrieve(false); page = balloon_retrieve(false);
BUG_ON(page == NULL); BUG_ON(page == NULL);
#ifdef CONFIG_XEN_HAVE_PVMMU xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
#endif
/* Relinquish the page back to the allocator. */ /* Relinquish the page back to the allocator. */
free_reserved_page(page); free_reserved_page(page);
...@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
unsigned long i; unsigned long i;
struct page *page, *tmp; struct page *page, *tmp;
int ret; int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
LIST_HEAD(pages); LIST_HEAD(pages);
if (nr_pages > ARRAY_SIZE(frame_list)) if (nr_pages > ARRAY_SIZE(frame_list))
...@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
break; break;
} }
adjust_managed_page_count(page, -1); adjust_managed_page_count(page, -1);
scrub_page(page); xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages); list_add(&page->lru, &pages);
} }
...@@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
*/ */
i = 0; i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) { list_for_each_entry_safe(page, tmp, &pages, lru) {
/* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page); frame_list[i++] = xen_page_to_gfn(page);
#ifdef CONFIG_XEN_HAVE_PVMMU xenmem_reservation_va_mapping_reset(1, &page);
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
#endif
list_del(&page->lru); list_del(&page->lru);
balloon_append(page); balloon_append(page);
...@@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
flush_tlb_all(); flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list); ret = xenmem_reservation_decrease(nr_pages, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
BUG_ON(ret != nr_pages); BUG_ON(ret != nr_pages);
balloon_stats.current_pages -= nr_pages; balloon_stats.current_pages -= nr_pages;
......
...@@ -17,7 +17,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, ...@@ -17,7 +17,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
* XXX: Add support for merging bio_vec when using different page * XXX: Add support for merging bio_vec when using different page
* size in Xen and Linux. * size in Xen and Linux.
*/ */
return 0; return false;
#endif #endif
} }
EXPORT_SYMBOL(xen_biovec_phys_mergeable); EXPORT_SYMBOL(xen_biovec_phys_mergeable);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common functionality of grant device.
*
* Copyright (c) 2006-2007, D G Murray.
* (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
* (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#ifndef _GNTDEV_COMMON_H
#define _GNTDEV_COMMON_H
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
struct gntdev_dmabuf_priv;
struct gntdev_priv {
/* Maps with visible offsets in the file descriptor. */
struct list_head maps;
/*
* Maps that are not visible; will be freed on munmap.
* Only populated if populate_freeable_maps == 1
*/
struct list_head freeable_maps;
/* lock protects maps and freeable_maps. */
struct mutex lock;
struct mm_struct *mm;
struct mmu_notifier mn;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/* Device for which DMA memory is allocated. */
struct device *dma_dev;
#endif
#ifdef CONFIG_XEN_GNTDEV_DMABUF
struct gntdev_dmabuf_priv *dmabuf_priv;
#endif
};
struct gntdev_unmap_notify {
int flags;
/* Address relative to the start of the gntdev_grant_map. */
int addr;
int event;
};
struct gntdev_grant_map {
struct list_head next;
struct vm_area_struct *vma;
int index;
int count;
int flags;
refcount_t users;
struct gntdev_unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
struct page **pages;
unsigned long pages_vm_start;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/*
* If dmabuf_vaddr is not NULL then this mapping is backed by DMA
* capable memory.
*/
struct device *dma_dev;
/* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
int dma_flags;
void *dma_vaddr;
dma_addr_t dma_bus_addr;
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
int dma_flags);
void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
bool gntdev_account_mapped_pages(int count);
int gntdev_map_grant_pages(struct gntdev_grant_map *map);
#endif
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xen dma-buf functionality for gntdev.
*
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#ifndef _GNTDEV_DMABUF_H
#define _GNTDEV_DMABUF_H
#include <xen/gntdev.h>
struct gntdev_dmabuf_priv;
struct gntdev_priv;
struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
struct ioctl_gntdev_dmabuf_exp_from_refs __user *u);
long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_exp_wait_released __user *u);
long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_imp_to_refs __user *u);
long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_imp_release __user *u);
#endif
This diff is collapsed.
...@@ -45,6 +45,9 @@ ...@@ -45,6 +45,9 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
#include <linux/dma-mapping.h>
#endif
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -57,6 +60,7 @@ ...@@ -57,6 +60,7 @@
#ifdef CONFIG_X86 #ifdef CONFIG_X86
#include <asm/xen/cpuid.h> #include <asm/xen/cpuid.h>
#endif #endif
#include <xen/mem-reservation.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
...@@ -769,29 +773,18 @@ void gnttab_free_auto_xlat_frames(void) ...@@ -769,29 +773,18 @@ void gnttab_free_auto_xlat_frames(void)
} }
EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
/** int gnttab_pages_set_private(int nr_pages, struct page **pages)
* gnttab_alloc_pages - alloc pages suitable for grant mapping into
* @nr_pages: number of pages to alloc
* @pages: returns the pages
*/
int gnttab_alloc_pages(int nr_pages, struct page **pages)
{ {
int i; int i;
int ret;
ret = alloc_xenballooned_pages(nr_pages, pages);
if (ret < 0)
return ret;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
#if BITS_PER_LONG < 64 #if BITS_PER_LONG < 64
struct xen_page_foreign *foreign; struct xen_page_foreign *foreign;
foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
if (!foreign) { if (!foreign)
gnttab_free_pages(nr_pages, pages);
return -ENOMEM; return -ENOMEM;
}
set_page_private(pages[i], (unsigned long)foreign); set_page_private(pages[i], (unsigned long)foreign);
#endif #endif
SetPagePrivate(pages[i]); SetPagePrivate(pages[i]);
...@@ -799,14 +792,30 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) ...@@ -799,14 +792,30 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(gnttab_alloc_pages); EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
/** /**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages() * gnttab_alloc_pages - alloc pages suitable for grant mapping into
* @nr_pages; number of pages to free * @nr_pages: number of pages to alloc
* @pages: the pages * @pages: returns the pages
*/ */
void gnttab_free_pages(int nr_pages, struct page **pages) int gnttab_alloc_pages(int nr_pages, struct page **pages)
{
int ret;
ret = alloc_xenballooned_pages(nr_pages, pages);
if (ret < 0)
return ret;
ret = gnttab_pages_set_private(nr_pages, pages);
if (ret < 0)
gnttab_free_pages(nr_pages, pages);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
void gnttab_pages_clear_private(int nr_pages, struct page **pages)
{ {
int i; int i;
...@@ -818,10 +827,114 @@ void gnttab_free_pages(int nr_pages, struct page **pages) ...@@ -818,10 +827,114 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
ClearPagePrivate(pages[i]); ClearPagePrivate(pages[i]);
} }
} }
}
EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
/**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
* @nr_pages; number of pages to free
* @pages: the pages
*/
void gnttab_free_pages(int nr_pages, struct page **pages)
{
gnttab_pages_clear_private(nr_pages, pages);
free_xenballooned_pages(nr_pages, pages); free_xenballooned_pages(nr_pages, pages);
} }
EXPORT_SYMBOL_GPL(gnttab_free_pages); EXPORT_SYMBOL_GPL(gnttab_free_pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/**
* gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
* @args: arguments to the function
*/
int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
{
unsigned long pfn, start_pfn;
size_t size;
int i, ret;
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
&args->dev_bus_addr,
GFP_KERNEL | __GFP_NOWARN);
else
args->vaddr = dma_alloc_wc(args->dev, size,
&args->dev_bus_addr,
GFP_KERNEL | __GFP_NOWARN);
if (!args->vaddr) {
pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
return -ENOMEM;
}
start_pfn = __phys_to_pfn(args->dev_bus_addr);
for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
pfn++, i++) {
struct page *page = pfn_to_page(pfn);
args->pages[i] = page;
args->frames[i] = xen_page_to_gfn(page);
xenmem_reservation_scrub_page(page);
}
xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
pr_debug("Failed to decrease reservation for DMA buffer\n");
ret = -EFAULT;
goto fail;
}
ret = gnttab_pages_set_private(args->nr_pages, args->pages);
if (ret < 0)
goto fail;
return 0;
fail:
gnttab_dma_free_pages(args);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
/**
* gnttab_dma_free_pages - free DMAable pages
* @args: arguments to the function
*/
int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
{
size_t size;
int i, ret;
gnttab_pages_clear_private(args->nr_pages, args->pages);
for (i = 0; i < args->nr_pages; i++)
args->frames[i] = page_to_xen_pfn(args->pages[i]);
ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
pr_debug("Failed to decrease reservation for DMA buffer\n");
ret = -EFAULT;
} else {
ret = 0;
}
xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
args->frames);
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
dma_free_coherent(args->dev, size,
args->vaddr, args->dev_bus_addr);
else
dma_free_wc(args->dev, size,
args->vaddr, args->dev_bus_addr);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
#endif
/* Handling of paged out grant targets (GNTST_eagain) */ /* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256 #define MAX_DELAY 256
static inline void static inline void
......
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
#include <xen/mem-reservation.h>
/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
* multiple frame.
*/
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
int i;
for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
BUG_ON(!page);
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
set_phys_to_machine(pfn, frames[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
int i;
for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
/*
* We don't support PV MMU when Linux and Xen are using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
#endif /* CONFIG_XEN_HAVE_PVMMU */
/* @frames is an array of PFNs */
int xenmem_reservation_increase(int count, xen_pfn_t *frames)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
set_xen_guest_handle(reservation.extent_start, frames);
reservation.nr_extents = count;
return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
/* @frames is an array of GFNs */
int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
/* XENMEM_decrease_reservation requires a GFN */
set_xen_guest_handle(reservation.extent_start, frames);
reservation.nr_extents = count;
return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
...@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch, ...@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
static_max = new_target; static_max = new_target;
else else
static_max >>= PAGE_SHIFT - 10; static_max >>= PAGE_SHIFT - 10;
target_diff = xen_pv_domain() ? 0 target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
: static_max - balloon_stats.target_pages; : static_max - balloon_stats.target_pages;
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Interface to /dev/xen/gntdev. * Interface to /dev/xen/gntdev.
* *
* Copyright (c) 2007, D G Murray * Copyright (c) 2007, D G Murray
* Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2 * modify it under the terms of the GNU General Public License version 2
...@@ -200,4 +201,109 @@ struct ioctl_gntdev_grant_copy { ...@@ -200,4 +201,109 @@ struct ioctl_gntdev_grant_copy {
/* Send an interrupt on the indicated event channel */ /* Send an interrupt on the indicated event channel */
#define UNMAP_NOTIFY_SEND_EVENT 0x2 #define UNMAP_NOTIFY_SEND_EVENT 0x2
/*
* Flags to be used while requesting memory mapping's backing storage
* to be allocated with DMA API.
*/
/*
* The buffer is backed with memory allocated with dma_alloc_wc.
*/
#define GNTDEV_DMA_FLAG_WC (1 << 0)
/*
* The buffer is backed with memory allocated with dma_alloc_coherent.
*/
#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
/*
* Create a dma-buf [1] from grant references @refs of count @count provided
* by the foreign domain @domid with flags @flags.
*
* By default dma-buf is backed by system memory pages, but by providing
* one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
* a DMA write-combine or coherent buffer, e.g. allocated with dma_alloc_wc/
* dma_alloc_coherent.
*
* Returns 0 if dma-buf was successfully created and the corresponding
* dma-buf's file descriptor is returned in @fd.
*
* [1] Documentation/driver-api/dma-buf.rst
*/
#define IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS \
_IOC(_IOC_NONE, 'G', 9, \
sizeof(struct ioctl_gntdev_dmabuf_exp_from_refs))
struct ioctl_gntdev_dmabuf_exp_from_refs {
/* IN parameters. */
/* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */
__u32 flags;
/* Number of grant references in @refs array. */
__u32 count;
/* OUT parameters. */
/* File descriptor of the dma-buf. */
__u32 fd;
/* The domain ID of the grant references to be mapped. */
__u32 domid;
/* Variable IN parameter. */
/* Array of grant references of size @count. */
__u32 refs[1];
};
/*
* This will block until the dma-buf with the file descriptor @fd is
* released. This is only valid for buffers created with
* IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
*
* If within @wait_to_ms milliseconds the buffer is not released
* then -ETIMEDOUT error is returned.
* If the buffer with the file descriptor @fd does not exist or has already
* been released, then -ENOENT is returned. For valid file descriptors
* this must not be treated as error.
*/
#define IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED \
_IOC(_IOC_NONE, 'G', 10, \
sizeof(struct ioctl_gntdev_dmabuf_exp_wait_released))
struct ioctl_gntdev_dmabuf_exp_wait_released {
/* IN parameters */
__u32 fd;
__u32 wait_to_ms;
};
/*
* Import a dma-buf with file descriptor @fd and export granted references
* to the pages of that dma-buf into array @refs of size @count.
*/
#define IOCTL_GNTDEV_DMABUF_IMP_TO_REFS \
_IOC(_IOC_NONE, 'G', 11, \
sizeof(struct ioctl_gntdev_dmabuf_imp_to_refs))
struct ioctl_gntdev_dmabuf_imp_to_refs {
/* IN parameters. */
/* File descriptor of the dma-buf. */
__u32 fd;
/* Number of grant references in @refs array. */
__u32 count;
/* The domain ID for which references to be granted. */
__u32 domid;
/* Reserved - must be zero. */
__u32 reserved;
/* OUT parameters. */
/* Array of grant references of size @count. */
__u32 refs[1];
};
/*
* This will close all references to the imported buffer with file descriptor
* @fd, so it can be released by the owner. This is only valid for buffers
* created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
*/
#define IOCTL_GNTDEV_DMABUF_IMP_RELEASE \
_IOC(_IOC_NONE, 'G', 12, \
sizeof(struct ioctl_gntdev_dmabuf_imp_release))
struct ioctl_gntdev_dmabuf_imp_release {
/* IN parameters */
__u32 fd;
__u32 reserved;
};
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */ #endif /* __LINUX_PUBLIC_GNTDEV_H__ */
...@@ -198,6 +198,27 @@ void gnttab_free_auto_xlat_frames(void); ...@@ -198,6 +198,27 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages); int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages); void gnttab_free_pages(int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
struct gnttab_dma_alloc_args {
/* Device for which DMA memory will be/was allocated. */
struct device *dev;
/* If set then DMA buffer is coherent and write-combine otherwise. */
bool coherent;
int nr_pages;
struct page **pages;
xen_pfn_t *frames;
void *vaddr;
dma_addr_t dev_bus_addr;
};
int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
#endif
int gnttab_pages_set_private(int nr_pages, struct page **pages);
void gnttab_pages_clear_private(int nr_pages, struct page **pages);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#ifndef _XENMEM_RESERVATION_H
#define _XENMEM_RESERVATION_H
#include <linux/highmem.h>
#include <xen/page.h>
static inline void xenmem_reservation_scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}
#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames);
void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages);
#endif
static inline void xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_update(count, pages, frames);
#endif
}
static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_reset(count, pages);
#endif
}
int xenmem_reservation_increase(int count, xen_pfn_t *frames);
int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment