Commit 9a032e39 authored by Ian Campbell's avatar Ian Campbell

xen: add pages parameter to xen_remap_domain_mfn_range

Also introduce xen_unmap_domain_mfn_range. These are the parts of
Mukesh's "xen/pvh: Implement MMU changes for PVH" which are also
needed as a baseline for ARM privcmd support.

The original patch was:
Signed-off-by: default avatarMukesh Rathor <mukesh.rathor@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>

This derivative is also:
Signed-off-by: default avatarIan Campbell <ian.campbell@citrix.com>
parent b3e40b72
...@@ -2479,7 +2479,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, ...@@ -2479,7 +2479,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, unsigned long mfn, int nr,
pgprot_t prot, unsigned domid) pgprot_t prot, unsigned domid,
struct page **pages)
{ {
struct remap_data rmd; struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE]; struct mmu_update mmu_update[REMAP_BATCH_SIZE];
...@@ -2523,3 +2525,14 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, ...@@ -2523,3 +2525,14 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
return err; return err;
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
{
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
return 0;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
...@@ -178,7 +178,7 @@ static int mmap_mfn_range(void *data, void *state) ...@@ -178,7 +178,7 @@ static int mmap_mfn_range(void *data, void *state)
msg->va & PAGE_MASK, msg->va & PAGE_MASK,
msg->mfn, msg->npages, msg->mfn, msg->npages,
vma->vm_page_prot, vma->vm_page_prot,
st->domain); st->domain, NULL);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -267,7 +267,8 @@ static int mmap_batch_fn(void *data, void *state) ...@@ -267,7 +267,8 @@ static int mmap_batch_fn(void *data, void *state)
int ret; int ret;
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
st->vma->vm_page_prot, st->domain); st->vma->vm_page_prot, st->domain,
NULL);
/* Store error code for second pass. */ /* Store error code for second pass. */
*(st->err++) = ret; *(st->err++) = ret;
......
...@@ -27,6 +27,9 @@ struct vm_area_struct; ...@@ -27,6 +27,9 @@ struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, unsigned long mfn, int nr,
pgprot_t prot, unsigned domid); pgprot_t prot, unsigned domid,
struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
#endif /* INCLUDE_XEN_OPS_H */ #endif /* INCLUDE_XEN_OPS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment