Commit 292974c5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.20a-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:

 - A revert of a previous commit as it is no longer necessary and has
   shown to cause problems in some memory hotplug cases.

 - Some small fixes and a minor cleanup.

 - A patch for adding better diagnostic data in a very rare failure
   case.

* tag 'for-linus-4.20a-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  pvcalls-front: fixes incorrect error handling
  Revert "xen/balloon: Mark unallocated host memory as UNUSABLE"
  xen: xlate_mmu: add missing header to fix 'W=1' warning
  xen/x86: add diagnostic printout to xen_mc_flush() in case of error
  x86/xen: cleanup includes in arch/x86/xen/spinlock.c
parents a234c737 975ef94a
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
...@@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num) ...@@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num)
} }
EXPORT_SYMBOL(xen_arch_unregister_cpu); EXPORT_SYMBOL(xen_arch_unregister_cpu);
#endif #endif
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
void __init arch_xen_balloon_init(struct resource *hostmem_resource)
{
struct xen_memory_map memmap;
int rc;
unsigned int i, last_guest_ram;
phys_addr_t max_addr = PFN_PHYS(max_pfn);
struct e820_table *xen_e820_table;
const struct e820_entry *entry;
struct resource *res;
if (!xen_initial_domain())
return;
xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
if (!xen_e820_table)
return;
memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
if (rc) {
pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
goto out;
}
last_guest_ram = 0;
for (i = 0; i < memmap.nr_entries; i++) {
if (xen_e820_table->entries[i].addr >= max_addr)
break;
if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
last_guest_ram = i;
}
entry = &xen_e820_table->entries[last_guest_ram];
if (max_addr >= entry->addr + entry->size)
goto out; /* No unallocated host RAM. */
hostmem_resource->start = max_addr;
hostmem_resource->end = entry->addr + entry->size;
/*
* Mark non-RAM regions between the end of dom0 RAM and end of host RAM
* as unavailable. The rest of that region can be used for hotplug-based
* ballooning.
*/
for (; i < memmap.nr_entries; i++) {
entry = &xen_e820_table->entries[i];
if (entry->type == E820_TYPE_RAM)
continue;
if (entry->addr >= hostmem_resource->end)
break;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
goto out;
res->name = "Unavailable host RAM";
res->start = entry->addr;
res->end = (entry->addr + entry->size < hostmem_resource->end) ?
entry->addr + entry->size : hostmem_resource->end;
rc = insert_resource(hostmem_resource, res);
if (rc) {
pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
__func__, res->start, res->end, rc);
kfree(res);
goto out;
}
}
out:
kfree(xen_e820_table);
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
...@@ -69,6 +69,11 @@ void xen_mc_flush(void) ...@@ -69,6 +69,11 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
#if MC_DEBUG
memcpy(b->debug, b->entries,
b->mcidx * sizeof(struct multicall_entry));
#endif
switch (b->mcidx) { switch (b->mcidx) {
case 0: case 0:
/* no-op */ /* no-op */
...@@ -87,32 +92,34 @@ void xen_mc_flush(void) ...@@ -87,32 +92,34 @@ void xen_mc_flush(void)
break; break;
default: default:
#if MC_DEBUG
memcpy(b->debug, b->entries,
b->mcidx * sizeof(struct multicall_entry));
#endif
if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
BUG(); BUG();
for (i = 0; i < b->mcidx; i++) for (i = 0; i < b->mcidx; i++)
if (b->entries[i].result < 0) if (b->entries[i].result < 0)
ret++; ret++;
}
if (WARN_ON(ret)) {
pr_err("%d of %d multicall(s) failed: cpu %d\n",
ret, b->mcidx, smp_processor_id());
for (i = 0; i < b->mcidx; i++) {
if (b->entries[i].result < 0) {
#if MC_DEBUG #if MC_DEBUG
if (ret) { pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n",
printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", i + 1,
ret, smp_processor_id());
dump_stack();
for (i = 0; i < b->mcidx; i++) {
printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
i+1, b->mcidx,
b->debug[i].op, b->debug[i].op,
b->debug[i].args[0], b->debug[i].args[0],
b->entries[i].result, b->entries[i].result,
b->caller[i]); b->caller[i]);
#else
pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
i + 1,
b->entries[i].op,
b->entries[i].args[0],
b->entries[i].result);
#endif
} }
} }
#endif
} }
b->mcidx = 0; b->mcidx = 0;
...@@ -126,8 +133,6 @@ void xen_mc_flush(void) ...@@ -126,8 +133,6 @@ void xen_mc_flush(void)
b->cbidx = 0; b->cbidx = 0;
local_irq_restore(flags); local_irq_restore(flags);
WARN_ON(ret);
} }
struct multicall_space __xen_mc_entry(size_t args) struct multicall_space __xen_mc_entry(size_t args)
......
...@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void) ...@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
addr = xen_e820_table.entries[0].addr; addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size; size = xen_e820_table.entries[0].size;
while (i < xen_e820_table.nr_entries) { while (i < xen_e820_table.nr_entries) {
bool discard = false;
chunk_size = size; chunk_size = size;
type = xen_e820_table.entries[i].type; type = xen_e820_table.entries[i].type;
...@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void) ...@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
xen_add_extra_mem(pfn_s, n_pfns); xen_add_extra_mem(pfn_s, n_pfns);
xen_max_p2m_pfn = pfn_s + n_pfns; xen_max_p2m_pfn = pfn_s + n_pfns;
} else } else
type = E820_TYPE_UNUSABLE; discard = true;
} }
xen_align_and_add_e820_region(addr, chunk_size, type); if (!discard)
xen_align_and_add_e820_region(addr, chunk_size, type);
addr += chunk_size; addr += chunk_size;
size -= chunk_size; size -= chunk_size;
......
...@@ -3,22 +3,17 @@ ...@@ -3,22 +3,17 @@
* Split spinlock implementation out into its own file, so it can be * Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way. * compiled in a FTRACE-compatible way.
*/ */
#include <linux/kernel_stat.h> #include <linux/kernel.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/log2.h>
#include <linux/gfp.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/qspinlock.h> #include <asm/qspinlock.h>
#include <xen/interface/xen.h>
#include <xen/events.h> #include <xen/events.h>
#include "xen-ops.h" #include "xen-ops.h"
#include "debugfs.h"
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(char *, irq_name);
......
...@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource) ...@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
kfree(resource); kfree(resource);
} }
/*
* Host memory not allocated to dom0. We can use this range for hotplug-based
* ballooning.
*
* It's a type-less resource. Setting IORESOURCE_MEM will make resource
* management algorithms (arch_remove_reservations()) look into guest e820,
* which we don't want.
*/
static struct resource hostmem_resource = {
.name = "Host RAM",
};
void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
{}
static struct resource *additional_memory_resource(phys_addr_t size) static struct resource *additional_memory_resource(phys_addr_t size)
{ {
struct resource *res, *res_hostmem; struct resource *res;
int ret = -ENOMEM; int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL); res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) if (!res)
...@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size) ...@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
res->name = "System RAM"; res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL); ret = allocate_resource(&iomem_resource, res,
if (res_hostmem) { size, 0, -1,
/* Try to grab a range from hostmem */ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
res_hostmem->name = "Host memory"; if (ret < 0) {
ret = allocate_resource(&hostmem_resource, res_hostmem, pr_err("Cannot allocate new System RAM resource\n");
size, 0, -1, kfree(res);
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); return NULL;
}
if (!ret) {
/*
* Insert this resource into iomem. Because hostmem_resource
* tracks portion of guest e820 marked as UNUSABLE noone else
* should try to use it.
*/
res->start = res_hostmem->start;
res->end = res_hostmem->end;
ret = insert_resource(&iomem_resource, res);
if (ret < 0) {
pr_err("Can't insert iomem_resource [%llx - %llx]\n",
res->start, res->end);
release_memory_resource(res_hostmem);
res_hostmem = NULL;
res->start = res->end = 0;
}
}
if (ret) {
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new System RAM resource\n");
kfree(res);
return NULL;
}
} }
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
...@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) ...@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit); pfn, limit);
release_memory_resource(res); release_memory_resource(res);
release_memory_resource(res_hostmem);
return NULL; return NULL;
} }
} }
...@@ -750,8 +705,6 @@ static int __init balloon_init(void) ...@@ -750,8 +705,6 @@ static int __init balloon_init(void)
set_online_page_callback(&xen_online_page); set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb); register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root); register_sysctl_table(xen_root);
arch_xen_balloon_init(&hostmem_resource);
#endif #endif
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
......
...@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn) ...@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error: out_error:
if (*evtchn >= 0) if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn); xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
kfree(map->active.data.in); free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
kfree(map->active.ring); free_page((unsigned long)map->active.ring);
return ret; return ret;
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
......
...@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void) ...@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
{ {
} }
#endif #endif
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
struct resource;
void arch_xen_balloon_init(struct resource *hostmem_resource);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment