Commit ae4c51a5 authored by Oleksandr Andrushchenko's avatar Oleksandr Andrushchenko Committed by Boris Ostrovsky

xen/balloon: Share common memory reservation routines

Memory {increase|decrease}_reservation and VA mappings update/reset
code used in balloon driver can be made common, so other drivers can
also re-use the same functionality without open-coding.
Create a dedicated file for the shared code and export corresponding
symbols for other kernel modules.
Signed-off-by: default avatarOleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent 8c3799ee
......@@ -2,6 +2,7 @@
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
obj-y += mem-reservation.o
obj-y += events/
obj-y += xenbus/
......
......@@ -71,6 +71,7 @@
#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/mem-reservation.h>
static int xen_hotplug_unpopulated;
......@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
#define GFP_BALLOON \
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}
/* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page)
{
......@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
int rc;
unsigned long i;
struct page *page;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
......@@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
break;
}
/* XENMEM_populate_physmap requires a PFN based on Xen
* granularity.
*/
frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page);
}
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
rc = xenmem_reservation_increase(nr_pages, frame_list);
if (rc <= 0)
return BP_EAGAIN;
......@@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
page = balloon_retrieve(false);
BUG_ON(page == NULL);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
#endif
xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
/* Relinquish the page back to the allocator. */
free_reserved_page(page);
......@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
unsigned long i;
struct page *page, *tmp;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
LIST_HEAD(pages);
if (nr_pages > ARRAY_SIZE(frame_list))
......@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
break;
}
adjust_managed_page_count(page, -1);
scrub_page(page);
xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages);
}
......@@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
*/
i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) {
/* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
xenmem_reservation_va_mapping_reset(1, &page);
if (!PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
#endif
list_del(&page->lru);
balloon_append(page);
......@@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
ret = xenmem_reservation_decrease(nr_pages, frame_list);
BUG_ON(ret != nr_pages);
balloon_stats.current_pages -= nr_pages;
......
// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
#include <xen/mem-reservation.h>
/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
* multiple frame.
*/
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
int i;
for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
BUG_ON(!page);
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
set_phys_to_machine(pfn, frames[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
int i;
for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
/*
* We don't support PV MMU when Linux and Xen are using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
#endif /* CONFIG_XEN_HAVE_PVMMU */
/* @frames is an array of PFNs */
int xenmem_reservation_increase(int count, xen_pfn_t *frames)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
set_xen_guest_handle(reservation.extent_start, frames);
reservation.nr_extents = count;
return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
/* @frames is an array of GFNs */
int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
/* XENMEM_decrease_reservation requires a GFN */
set_xen_guest_handle(reservation.extent_start, frames);
reservation.nr_extents = count;
return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xen memory reservation utilities.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
#ifndef _XENMEM_RESERVATION_H
#define _XENMEM_RESERVATION_H
#include <linux/highmem.h>
#include <xen/page.h>
static inline void xenmem_reservation_scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}
#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames);
void __xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages);
#endif
static inline void xenmem_reservation_va_mapping_update(unsigned long count,
struct page **pages,
xen_pfn_t *frames)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_update(count, pages, frames);
#endif
}
static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap))
__xenmem_reservation_va_mapping_reset(count, pages);
#endif
}
int xenmem_reservation_increase(int count, xen_pfn_t *frames);
int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment