Commit 975fac3c authored by Shannon Zhao's avatar Shannon Zhao Committed by David Vrabel

Xen: xlate: Use page_to_xen_pfn instead of page_to_pfn

Make xen_xlate_map_ballooned_pages work with 64K pages. In that case
Kernel pages are 64K in size but Xen pages remain 4K in size. Xen pfns
refer to 4K pages.
Signed-off-by: default avatarShannon Zhao <shannon.zhao@linaro.org>
Reviewed-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: default avatarJulien Grall <julien.grall@arm.com>
Tested-by: default avatarJulien Grall <julien.grall@arm.com>
parent 243848fc
...@@ -189,6 +189,18 @@ int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, ...@@ -189,6 +189,18 @@ int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
} }
EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range); EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
struct map_balloon_pages {
xen_pfn_t *pfns;
unsigned int idx;
};
static void setup_balloon_gfn(unsigned long gfn, void *data)
{
struct map_balloon_pages *info = data;
info->pfns[info->idx++] = gfn;
}
/** /**
* xen_xlate_map_ballooned_pages - map a new set of ballooned pages * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
* @gfns: returns the array of corresponding GFNs * @gfns: returns the array of corresponding GFNs
...@@ -205,11 +217,13 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, ...@@ -205,11 +217,13 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
struct page **pages; struct page **pages;
xen_pfn_t *pfns; xen_pfn_t *pfns;
void *vaddr; void *vaddr;
struct map_balloon_pages data;
int rc; int rc;
unsigned int i; unsigned long nr_pages;
BUG_ON(nr_grant_frames == 0); BUG_ON(nr_grant_frames == 0);
pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
...@@ -218,22 +232,24 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, ...@@ -218,22 +232,24 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
kfree(pages); kfree(pages);
return -ENOMEM; return -ENOMEM;
} }
rc = alloc_xenballooned_pages(nr_grant_frames, pages); rc = alloc_xenballooned_pages(nr_pages, pages);
if (rc) { if (rc) {
pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
nr_grant_frames, rc); nr_pages, rc);
kfree(pages); kfree(pages);
kfree(pfns); kfree(pfns);
return rc; return rc;
} }
for (i = 0; i < nr_grant_frames; i++)
pfns[i] = page_to_pfn(pages[i]);
vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL); data.pfns = pfns;
data.idx = 0;
xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
if (!vaddr) { if (!vaddr) {
pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__, pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
nr_grant_frames, rc); nr_pages, rc);
free_xenballooned_pages(nr_grant_frames, pages); free_xenballooned_pages(nr_pages, pages);
kfree(pages); kfree(pages);
kfree(pfns); kfree(pfns);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment