Commit d5431d52 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/p2m/m2p/gnttab: Support GNTMAP_host_map in the M2P override.

We only supported the M2P (and P2M) override only for the
GNTMAP_contains_pte type mappings. Meaning that we grants
operations would "contain the machine address of the PTE to update"
If the flag is unset, then the grant operation is
"contains a host virtual address". The latter case means that
the Hypervisor takes care of updating our page table
(specifically the PTE entry) with the guest's MFN. As such we should
not try to do anything with the PTE. Previous to this patch
we would try to clear the PTE which resulted in Xen hypervisor
being upset with us:

(XEN) mm.c:1066:d0 Attempt to implicitly unmap a granted PTE c0100000ccc59067
(XEN) domain_crash called from mm.c:1067
(XEN) Domain 0 (vcpu#0) crashed on cpu#3:
(XEN) ----[ Xen-4.0-110228  x86_64  debug=y  Not tainted ]----

and crashing us.

This patch allows us to inhibit the PTE clearing in the PV guest
if the GNTMAP_contains_pte is not set.

On the m2p_remove_override path we provide the same parameter.

Sadly in the grant-table driver we do not have a mechanism to
tell m2p_remove_override whether to clear the PTE or not. Since
the grant-table driver is used by user-space, we can safely assume
that it operates only on PTE's. Hence the implementation for
it to work on !GNTMAP_contains_pte returns -EOPNOTSUPP. In the future
we can implement the support for this. It will require some extra
accounting structure to keep track of the page[i], and the flag.

[v1: Added documentation details, made it return -EOPNOTSUPP instead
 of trying to do a half-way implementation]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent edf6ef59
...@@ -47,8 +47,9 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); ...@@ -47,8 +47,9 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e); unsigned long pfn_e);
extern int m2p_add_override(unsigned long mfn, struct page *page); extern int m2p_add_override(unsigned long mfn, struct page *page,
extern int m2p_remove_override(struct page *page); bool clear_pte);
extern int m2p_remove_override(struct page *page, bool clear_pte);
extern struct page *m2p_find_override(unsigned long mfn); extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
......
...@@ -650,7 +650,7 @@ static unsigned long mfn_hash(unsigned long mfn) ...@@ -650,7 +650,7 @@ static unsigned long mfn_hash(unsigned long mfn)
} }
/* Add an MFN override for a particular page */ /* Add an MFN override for a particular page */
int m2p_add_override(unsigned long mfn, struct page *page) int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
{ {
unsigned long flags; unsigned long flags;
unsigned long pfn; unsigned long pfn;
...@@ -662,7 +662,6 @@ int m2p_add_override(unsigned long mfn, struct page *page) ...@@ -662,7 +662,6 @@ int m2p_add_override(unsigned long mfn, struct page *page)
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT); address = (unsigned long)__va(pfn << PAGE_SHIFT);
ptep = lookup_address(address, &level); ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K, if (WARN(ptep == NULL || level != PG_LEVEL_4K,
"m2p_add_override: pfn %lx not mapped", pfn)) "m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL; return -EINVAL;
...@@ -674,10 +673,9 @@ int m2p_add_override(unsigned long mfn, struct page *page) ...@@ -674,10 +673,9 @@ int m2p_add_override(unsigned long mfn, struct page *page)
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM; return -ENOMEM;
if (!PageHighMem(page)) if (clear_pte && !PageHighMem(page))
/* Just zap old mapping for now */ /* Just zap old mapping for now */
pte_clear(&init_mm, address, ptep); pte_clear(&init_mm, address, ptep);
spin_lock_irqsave(&m2p_override_lock, flags); spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags); spin_unlock_irqrestore(&m2p_override_lock, flags);
...@@ -685,7 +683,7 @@ int m2p_add_override(unsigned long mfn, struct page *page) ...@@ -685,7 +683,7 @@ int m2p_add_override(unsigned long mfn, struct page *page)
return 0; return 0;
} }
int m2p_remove_override(struct page *page) int m2p_remove_override(struct page *page, bool clear_pte)
{ {
unsigned long flags; unsigned long flags;
unsigned long mfn; unsigned long mfn;
...@@ -713,7 +711,7 @@ int m2p_remove_override(struct page *page) ...@@ -713,7 +711,7 @@ int m2p_remove_override(struct page *page)
spin_unlock_irqrestore(&m2p_override_lock, flags); spin_unlock_irqrestore(&m2p_override_lock, flags);
set_phys_to_machine(pfn, page->index); set_phys_to_machine(pfn, page->index);
if (!PageHighMem(page)) if (clear_pte && !PageHighMem(page))
set_pte_at(&init_mm, address, ptep, set_pte_at(&init_mm, address, ptep,
pfn_pte(pfn, PAGE_KERNEL)); pfn_pte(pfn, PAGE_KERNEL));
/* No tlb flush necessary because the caller already /* No tlb flush necessary because the caller already
......
...@@ -466,13 +466,30 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -466,13 +466,30 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (map_ops[i].status) if (map_ops[i].status)
continue; continue;
/* m2p override only supported for GNTMAP_contains_pte mappings */ if (map_ops[i].flags & GNTMAP_contains_pte) {
if (!(map_ops[i].flags & GNTMAP_contains_pte)) pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
continue;
pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
(map_ops[i].host_addr & ~PAGE_MASK)); (map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte); mfn = pte_mfn(*pte);
ret = m2p_add_override(mfn, pages[i]); } else {
/* If you really wanted to do this:
* mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
*
* The reason we do not implement it is b/c on the
* unmap path (gnttab_unmap_refs) we have no means of
* checking whether the page is !GNTMAP_contains_pte.
*
* That is without some extra data-structure to carry
* the struct page, bool clear_pte, and list_head next
* tuples and deal with allocation/delallocation, etc.
*
* The users of this API set the GNTMAP_contains_pte
* flag so lets just return not supported until it
* becomes neccessary to implement.
*/
return -EOPNOTSUPP;
}
ret = m2p_add_override(mfn, pages[i],
map_ops[i].flags & GNTMAP_contains_pte);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -494,7 +511,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -494,7 +511,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret; return ret;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i]); ret = m2p_remove_override(pages[i], true /* clear the PTE */);
if (ret) if (ret)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment