Commit c5a58f87 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.12b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Two security issues (XSA-367 and XSA-369)"

* tag 'for-linus-5.12b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: fix p2m size in dom0 for disabled memory hotplug case
  xen-netback: respect gnttab_map_refs()'s return value
  Xen/gnttab: handle p2m update errors on a per-slot basis
parents 43df5242 88221399
...@@ -93,12 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -93,12 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i; int i;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct gnttab_unmap_grant_ref unmap;
int rc;
if (map_ops[i].status) if (map_ops[i].status)
continue; continue;
if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) { map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
return -ENOMEM; continue;
}
/*
* Signal an error for this slot. This in turn requires
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
unmap.host_addr = map_ops[i].host_addr,
unmap.handle = map_ops[i].handle;
map_ops[i].handle = ~0;
if (map_ops[i].flags & GNTMAP_device_map)
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
else
unmap.dev_bus_addr = 0;
/*
* Pre-populate the status field, to be recognizable in
* the log message below.
*/
unmap.status = 1;
rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
&unmap, 1);
if (rc || unmap.status != GNTST_okay)
pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
rc, unmap.status);
} }
return 0; return 0;
......
...@@ -86,6 +86,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -86,6 +86,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
} }
#endif #endif
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define XEN_EXTRA_MEM_RATIO (10)
/* /*
* Helper functions to write or read unsigned long values to/from * Helper functions to write or read unsigned long values to/from
* memory, when the access may fault. * memory, when the access may fault.
......
...@@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void) ...@@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn; xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
vm.flags = VM_ALLOC; vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE); PMD_SIZE * PMDS_PER_MID_PAGE);
...@@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pte_t *ptep; pte_t *ptep;
unsigned int level; unsigned int level;
if (unlikely(pfn >= xen_p2m_size)) { /* Only invalid entries allowed above the highest p2m covered frame. */
BUG_ON(mfn != INVALID_P2M_ENTRY); if (unlikely(pfn >= xen_p2m_size))
return true; return mfn == INVALID_P2M_ENTRY;
}
/* /*
* The interface requires atomic updates on p2m elements. * The interface requires atomic updates on p2m elements.
...@@ -710,6 +712,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -710,6 +712,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
unsigned long mfn, pfn; unsigned long mfn, pfn;
struct gnttab_unmap_grant_ref unmap[2];
int rc;
/* Do not add to override if the map failed. */ /* Do not add to override if the map failed. */
if (map_ops[i].status != GNTST_okay || if (map_ops[i].status != GNTST_okay ||
...@@ -727,10 +731,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -727,10 +731,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
ret = -ENOMEM; continue;
goto out;
/*
* Signal an error for this slot. This in turn requires
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
unmap[0].host_addr = map_ops[i].host_addr,
unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = ~0;
if (map_ops[i].flags & GNTMAP_device_map)
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
else
unmap[0].dev_bus_addr = 0;
if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error;
unmap[1].host_addr = kmap_ops[i].host_addr,
unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = ~0;
if (kmap_ops[i].flags & GNTMAP_device_map)
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
else
unmap[1].dev_bus_addr = 0;
} }
/*
* Pre-populate both status fields, to be recognizable in
* the log message below.
*/
unmap[0].status = 1;
unmap[1].status = 1;
rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
unmap, 1 + !!kmap_ops);
if (rc || unmap[0].status != GNTST_okay ||
unmap[1].status != GNTST_okay)
pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
rc, unmap[0].status, unmap[1].status);
} }
out: out:
......
...@@ -59,18 +59,6 @@ static struct { ...@@ -59,18 +59,6 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE); } xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void) static void __init xen_parse_512gb(void)
...@@ -790,20 +778,13 @@ char * __init xen_memory_setup(void) ...@@ -790,20 +778,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
/* /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base * factor the base size.
* size is the full initial memory allocation; on highmem it
* is limited to the max size of lowmem, so that it doesn't
* get completely filled.
* *
* Make sure we have no memory above max_pages, as this area * Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management. * isn't handled by the p2m management.
*
* In principle there could be a problem in lowmem systems if
* the initial memory is also very large with respect to
* lowmem, but we won't try to deal with that here.
*/ */
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn); extra_pages, max_pages - max_pfn);
i = 0; i = 0;
addr = xen_e820_table.entries[0].addr; addr = xen_e820_table.entries[0].addr;
......
...@@ -1343,11 +1343,21 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) ...@@ -1343,11 +1343,21 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
return 0; return 0;
gnttab_batch_copy(queue->tx_copy_ops, nr_cops); gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
if (nr_mops != 0) if (nr_mops != 0) {
ret = gnttab_map_refs(queue->tx_map_ops, ret = gnttab_map_refs(queue->tx_map_ops,
NULL, NULL,
queue->pages_to_map, queue->pages_to_map,
nr_mops); nr_mops);
if (ret) {
unsigned int i;
netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
nr_mops, ret);
for (i = 0; i < nr_mops; ++i)
WARN_ON_ONCE(queue->tx_map_ops[i].status ==
GNTST_okay);
}
}
work_done = xenvif_tx_submit(queue); work_done = xenvif_tx_submit(queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment