Commit 1cd731df authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.14-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen fixes from Konrad Rzeszutek Wilk:
 "Bug-fixes:
   - Revert "xen/grant-table: Avoid m2p_override during mapping" as it
     broke Xen ARM build.
   - Fix CR4 not being set on AP processors in Xen PVH mode"

* tag 'stable/for-linus-3.14-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/pvh: set CR4 flags for APs
  Revert "xen/grant-table: Avoid m2p_override during mapping"
parents 251aa0fd afca5013
...@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, ...@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page, extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op); struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, extern int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op, struct gnttab_map_grant_ref *kmap_op);
unsigned long mfn);
extern struct page *m2p_find_override(unsigned long mfn); extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
...@@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) ...@@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
pfn = m2p_find_override_pfn(mfn, ~0); pfn = m2p_find_override_pfn(mfn, ~0);
} }
/* /*
* pfn is ~0 if there are no entries in the m2p for mfn or if the * pfn is ~0 if there are no entries in the m2p for mfn or if the
* entry doesn't map back to the mfn and m2p_override doesn't have a * entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it. * valid entry for it.
......
...@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu) ...@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
* X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
* (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
if (!cpu)
return;
/*
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
*/
if (cpu_has_pse)
set_in_cr4(X86_CR4_PSE);
if (cpu_has_pge)
set_in_cr4(X86_CR4_PGE);
} }
/* /*
......
...@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page, ...@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
"m2p_add_override: pfn %lx not mapped", pfn)) "m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL; return -EINVAL;
} }
WARN_ON(PagePrivate(page));
SetPagePrivate(page);
set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
if (kmap_op != NULL) { if (kmap_op != NULL) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
...@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page, ...@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
} }
EXPORT_SYMBOL_GPL(m2p_add_override); EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page, int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op, struct gnttab_map_grant_ref *kmap_op)
unsigned long mfn)
{ {
unsigned long flags; unsigned long flags;
unsigned long mfn;
unsigned long pfn; unsigned long pfn;
unsigned long uninitialized_var(address); unsigned long uninitialized_var(address);
unsigned level; unsigned level;
pte_t *ptep = NULL; pte_t *ptep = NULL;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
return -EINVAL;
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT); address = (unsigned long)__va(pfn << PAGE_SHIFT);
...@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page, ...@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
spin_lock_irqsave(&m2p_override_lock, flags); spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru); list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags); spin_unlock_irqrestore(&m2p_override_lock, flags);
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
set_phys_to_machine(pfn, page->index);
if (kmap_op != NULL) { if (kmap_op != NULL) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
struct multicall_space mcs; struct multicall_space mcs;
......
...@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, ...@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) { !rb_next(&persistent_gnt->node)) {
ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
...@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work) ...@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page; pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
...@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work) ...@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
kfree(persistent_gnt); kfree(persistent_gnt);
} }
if (segs_to_unmap > 0) { if (segs_to_unmap > 0) {
ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
} }
...@@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, ...@@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
GNTMAP_host_map, pages[i]->handle); GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE; pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
invcount = 0; invcount = 0;
} }
} }
if (invcount) { if (invcount) {
ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
} }
...@@ -737,7 +740,7 @@ static int xen_blkbk_map(struct xen_blkif *blkif, ...@@ -737,7 +740,7 @@ static int xen_blkbk_map(struct xen_blkif *blkif,
} }
if (segs_to_map) { if (segs_to_map) {
ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
BUG_ON(ret); BUG_ON(ret);
} }
......
...@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map) ...@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
} }
pr_debug("map %d+%d\n", map->index, map->count); pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs_userspace(map->map_ops, err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
use_ptemod ? map->kmap_ops : NULL, map->pages, map->count);
map->pages,
map->count);
if (err) if (err)
return err; return err;
...@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) ...@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
} }
} }
err = gnttab_unmap_refs_userspace(map->unmap_ops + offset, err = gnttab_unmap_refs(map->unmap_ops + offset,
use_ptemod ? map->kmap_ops + offset : NULL, use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
map->pages + offset, pages);
pages);
if (err) if (err)
return err; return err;
......
...@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) ...@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
} }
EXPORT_SYMBOL_GPL(gnttab_batch_copy); EXPORT_SYMBOL_GPL(gnttab_batch_copy);
int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count, struct page **pages, unsigned int count)
bool m2p_override)
{ {
int i, ret; int i, ret;
bool lazy = false; bool lazy = false;
pte_t *pte; pte_t *pte;
unsigned long mfn, pfn; unsigned long mfn;
BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret) if (ret)
return ret; return ret;
...@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT); map_ops[i].dev_bus_addr >> PAGE_SHIFT);
} }
return 0; return ret;
} }
if (m2p_override && if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
lazy = true; lazy = true;
} }
...@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
} else { } else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr); mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
} }
pfn = page_to_pfn(pages[i]); ret = m2p_add_override(mfn, pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
WARN_ON(PagePrivate(pages[i]));
SetPagePrivate(pages[i]);
set_page_private(pages[i], mfn);
pages[i]->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM;
goto out;
}
if (m2p_override)
ret = m2p_add_override(mfn, pages[i], kmap_ops ?
&kmap_ops[i] : NULL);
if (ret) if (ret)
goto out; goto out;
} }
...@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret; return ret;
} }
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
return __gnttab_map_refs(map_ops, NULL, pages, count, false);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs); EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count, struct page **pages, unsigned int count)
bool m2p_override)
{ {
int i, ret; int i, ret;
bool lazy = false; bool lazy = false;
unsigned long pfn, mfn;
BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret) if (ret)
return ret; return ret;
...@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY); INVALID_P2M_ENTRY);
} }
return 0; return ret;
} }
if (m2p_override && if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
lazy = true; lazy = true;
} }
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
pfn = page_to_pfn(pages[i]); ret = m2p_remove_override(pages[i], kmap_ops ?
mfn = get_phys_to_machine(pfn); &kmap_ops[i] : NULL);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
ret = -EINVAL;
goto out;
}
set_page_private(pages[i], INVALID_P2M_ENTRY);
WARN_ON(!PagePrivate(pages[i]));
ClearPagePrivate(pages[i]);
set_phys_to_machine(pfn, pages[i]->index);
if (m2p_override)
ret = m2p_remove_override(pages[i],
kmap_ops ?
&kmap_ops[i] : NULL,
mfn);
if (ret) if (ret)
goto out; goto out;
} }
...@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret; return ret;
} }
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs); EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
static unsigned nr_status_frames(unsigned nr_grant_frames) static unsigned nr_status_frames(unsigned nr_grant_frames)
{ {
BUG_ON(grefs_per_grant_frame == 0); BUG_ON(grefs_per_grant_frame == 0);
......
...@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void); ...@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kunmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
/* Perform a batch of grant map/copy operations. Retry every batch slot /* Perform a batch of grant map/copy operations. Retry every batch slot
* for which the hypervisor returns GNTST_eagain. This is typically due * for which the hypervisor returns GNTST_eagain. This is typically due
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment