Commit 613d4cef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.19-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:
 "Several critical linear p2m fixes that prevented some hosts from
  booting"

* tag 'stable/for-linus-3.19-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  x86/xen: properly retrieve NMI reason
  xen: check for zero sized area when invalidating memory
  xen: use correct type for physical addresses
  xen: correct race in alloc_p2m_pmd()
  xen: correct error for building p2m list on 32 bits
  x86/xen: avoid freeing static 'name' when kasprintf() fails
  x86/xen: add extra memory for remapped frames during setup
  x86/xen: don't count how many PFNs are identity mapped
  x86/xen: Free bootmem in free_p2m_page() during early boot
  x86/xen: Remove unnecessary BUG_ON(preemptible()) in xen_setup_timer()
parents 4f7a42de f221b04f
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h> #include <xen/interface/vcpu.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h> #include <xen/interface/xen-mca.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
...@@ -66,6 +67,7 @@ ...@@ -66,6 +67,7 @@
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/stackprotector.h> #include <asm/stackprotector.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/mach_traps.h>
#include <asm/mwait.h> #include <asm/mwait.h>
#include <asm/pci_x86.h> #include <asm/pci_x86.h>
#include <asm/pat.h> #include <asm/pat.h>
...@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = { ...@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart, .emergency_restart = xen_emergency_restart,
}; };
static unsigned char xen_get_nmi_reason(void)
{
unsigned char reason = 0;
/* Construct a value which looks like it came from port 0x61. */
if (test_bit(_XEN_NMIREASON_io_error,
&HYPERVISOR_shared_info->arch.nmi_reason))
reason |= NMI_REASON_IOCHK;
if (test_bit(_XEN_NMIREASON_pci_serr,
&HYPERVISOR_shared_info->arch.nmi_reason))
reason |= NMI_REASON_SERR;
return reason;
}
static void __init xen_boot_params_init_edd(void) static void __init xen_boot_params_init_edd(void)
{ {
#if IS_ENABLED(CONFIG_EDD) #if IS_ENABLED(CONFIG_EDD)
...@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
pv_info = xen_info; pv_info = xen_info;
pv_init_ops = xen_init_ops; pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops; pv_apic_ops = xen_apic_ops;
if (!xen_pvh_domain()) if (!xen_pvh_domain()) {
pv_cpu_ops = xen_cpu_ops; pv_cpu_ops = xen_cpu_ops;
x86_platform.get_nmi_reason = xen_get_nmi_reason;
}
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
else else
......
...@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void) ...@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
} }
/* Only to be called in case of a race for a page just allocated! */ static void __ref free_p2m_page(void *p)
static void free_p2m_page(void *p)
{ {
BUG_ON(!slab_is_available()); if (unlikely(!slab_is_available())) {
free_bootmem((unsigned long)p, PAGE_SIZE);
return;
}
free_page((unsigned long)p); free_page((unsigned long)p);
} }
...@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) ...@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
p2m_missing_pte : p2m_identity_pte; p2m_missing_pte : p2m_identity_pte;
for (i = 0; i < PMDS_PER_MID_PAGE; i++) { for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pmdp = populate_extra_pmd( pmdp = populate_extra_pmd(
(unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); (unsigned long)(p2m + pfn) + i * PMD_SIZE);
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
} }
} }
...@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine); ...@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
* pmd. In case of PAE/x86-32 there are multiple pmds to allocate! * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
*/ */
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{ {
pte_t *ptechk; pte_t *ptechk;
pte_t *pteret = ptep;
pte_t *pte_newpg[PMDS_PER_MID_PAGE]; pte_t *pte_newpg[PMDS_PER_MID_PAGE];
pmd_t *pmdp; pmd_t *pmdp;
unsigned int level; unsigned int level;
...@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) ...@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
if (ptechk == pte_pg) { if (ptechk == pte_pg) {
set_pmd(pmdp, set_pmd(pmdp,
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
if (vaddr == (addr & ~(PMD_SIZE - 1)))
pteret = pte_offset_kernel(pmdp, addr);
pte_newpg[i] = NULL; pte_newpg[i] = NULL;
} }
...@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) ...@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
vaddr += PMD_SIZE; vaddr += PMD_SIZE;
} }
return pteret; return lookup_address(addr, &level);
} }
/* /*
...@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn) ...@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
/* PMD level is missing, allocate a new one */ /* PMD level is missing, allocate a new one */
ptep = alloc_p2m_pmd(addr, ptep, pte_pg); ptep = alloc_p2m_pmd(addr, pte_pg);
if (!ptep) if (!ptep)
return false; return false;
} }
......
...@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size) ...@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
unsigned long __ref xen_chk_extra_mem(unsigned long pfn) unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{ {
int i; int i;
unsigned long addr = PFN_PHYS(pfn); phys_addr_t addr = PFN_PHYS(pfn);
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (addr >= xen_extra_mem[i].start && if (addr >= xen_extra_mem[i].start &&
...@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void) ...@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
int i; int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (!xen_extra_mem[i].size)
continue;
pfn_s = PFN_DOWN(xen_extra_mem[i].start); pfn_s = PFN_DOWN(xen_extra_mem[i].start);
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
for (pfn = pfn_s; pfn < pfn_e; pfn++) for (pfn = pfn_s; pfn < pfn_e; pfn++)
...@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn) ...@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails. * as a fallback if the remapping fails.
*/ */
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
unsigned long *released)
{ {
unsigned long len = 0;
unsigned long pfn, end; unsigned long pfn, end;
int ret; int ret;
WARN_ON(start_pfn > end_pfn); WARN_ON(start_pfn > end_pfn);
/* Release pages first. */
end = min(end_pfn, nr_pages); end = min(end_pfn, nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) { for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn); unsigned long mfn = pfn_to_mfn(pfn);
...@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, ...@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) { if (ret == 1) {
(*released)++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break; break;
len++;
} else } else
break; break;
} }
/* Need to release pages first */ set_phys_range_identity(start_pfn, end_pfn);
*released += len;
*identity += set_phys_range_identity(start_pfn, end_pfn);
} }
/* /*
...@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) ...@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
} }
/* Update kernel mapping, but not for highmem. */ /* Update kernel mapping, but not for highmem. */
if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) if (pfn >= PFN_UP(__pa(high_memory - 1)))
return; return;
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
...@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk( ...@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
unsigned long ident_pfn_iter, remap_pfn_iter; unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_end_pfn = start_pfn + size; unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size; unsigned long left = size;
unsigned long ident_cnt = 0;
unsigned int i, chunk; unsigned int i, chunk;
WARN_ON(size == 0); WARN_ON(size == 0);
...@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk( ...@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
xen_remap_mfn = mfn; xen_remap_mfn = mfn;
/* Set identity map */ /* Set identity map */
ident_cnt += set_phys_range_identity(ident_pfn_iter, set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
ident_pfn_iter + chunk);
left -= chunk; left -= chunk;
} }
...@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk( ...@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn, const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
unsigned long *identity, unsigned long *released) unsigned long *released, unsigned long *remapped)
{ {
unsigned long pfn; unsigned long pfn;
unsigned long i = 0; unsigned long i = 0;
...@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Do not remap pages beyond the current allocation */ /* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) { if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */ /* Identity map remaining pages */
*identity += set_phys_range_identity(cur_pfn, set_phys_range_identity(cur_pfn, cur_pfn + size);
cur_pfn + size);
break; break;
} }
if (cur_pfn + size > nr_pages) if (cur_pfn + size > nr_pages)
...@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (!remap_range_size) { if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n"); pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn, xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages, identity, released); cur_pfn + left, nr_pages, released);
break; break;
} }
/* Adjust size to fit in current e820 RAM region */ /* Adjust size to fit in current e820 RAM region */
...@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Update variables to reflect new mappings. */ /* Update variables to reflect new mappings. */
i += size; i += size;
remap_pfn += size; remap_pfn += size;
*identity += size; *remapped += size;
} }
/* /*
...@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
static void __init xen_set_identity_and_remap( static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages, const struct e820entry *list, size_t map_size, unsigned long nr_pages,
unsigned long *released) unsigned long *released, unsigned long *remapped)
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long identity = 0;
unsigned long last_pfn = nr_pages; unsigned long last_pfn = nr_pages;
const struct e820entry *entry; const struct e820entry *entry;
unsigned long num_released = 0; unsigned long num_released = 0;
unsigned long num_remapped = 0;
int i; int i;
/* /*
...@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap( ...@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk( last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn, list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn, end_pfn, nr_pages, last_pfn,
&identity, &num_released); &num_released, &num_remapped);
start = end; start = end;
} }
} }
*released = num_released; *released = num_released;
*remapped = num_remapped;
pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
pr_info("Released %ld page(s)\n", num_released); pr_info("Released %ld page(s)\n", num_released);
} }
...@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void) ...@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap; struct xen_memory_map memmap;
unsigned long max_pages; unsigned long max_pages;
unsigned long extra_pages = 0; unsigned long extra_pages = 0;
unsigned long remapped_pages;
int i; int i;
int op; int op;
...@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void) ...@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
* underlying RAM. * underlying RAM.
*/ */
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
&xen_released_pages); &xen_released_pages, &remapped_pages);
extra_pages += xen_released_pages; extra_pages += xen_released_pages;
extra_pages += remapped_pages;
/* /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
......
...@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent = ...@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
struct xen_clock_event_device { struct xen_clock_event_device {
struct clock_event_device evt; struct clock_event_device evt;
char *name; char name[16];
}; };
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
...@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu) ...@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
if (evt->irq >= 0) { if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL); unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1; evt->irq = -1;
kfree(per_cpu(xen_clock_events, cpu).name);
per_cpu(xen_clock_events, cpu).name = NULL;
} }
} }
void xen_setup_timer(int cpu) void xen_setup_timer(int cpu)
{ {
char *name; struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
struct clock_event_device *evt; struct clock_event_device *evt = &xevt->evt;
int irq; int irq;
evt = &per_cpu(xen_clock_events, cpu).evt;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0) if (evt->irq >= 0)
xen_teardown_timer(cpu); xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
name = kasprintf(GFP_KERNEL, "timer%d", cpu); snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
if (!name)
name = "<timer kasprintf failed>";
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
name, NULL); xevt->name, NULL);
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt)); memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu); evt->cpumask = cpumask_of(cpu);
evt->irq = irq; evt->irq = irq;
per_cpu(xen_clock_events, cpu).name = name;
} }
void xen_setup_cpu_clockevents(void) void xen_setup_cpu_clockevents(void)
{ {
BUG_ON(preemptible());
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
} }
......
/******************************************************************************
* nmi.h
*
* NMI callback registration and reason codes.
*
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
#ifndef __XEN_PUBLIC_NMI_H__
#define __XEN_PUBLIC_NMI_H__
#include <xen/interface/xen.h>
/*
* NMI reason codes:
* Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
*/
/* I/O-check error reported via ISA port 0x61, bit 6. */
#define _XEN_NMIREASON_io_error 0
#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
/* PCI SERR reported via ISA port 0x61, bit 7. */
#define _XEN_NMIREASON_pci_serr 1
#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
/* Unknown hardware-generated NMI. */
#define _XEN_NMIREASON_unknown 2
#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
/*
* long nmi_op(unsigned int cmd, void *arg)
* NB. All ops return zero on success, else a negative error code.
*/
/*
* Register NMI callback for this (calling) VCPU. Currently this only makes
* sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
* arg == pointer to xennmi_callback structure.
*/
#define XENNMI_register_callback 0
struct xennmi_callback {
unsigned long handler_address;
unsigned long pad;
};
DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
/*
* Deregister NMI callback for this (calling) VCPU.
* arg == NULL.
*/
#define XENNMI_unregister_callback 1
#endif /* __XEN_PUBLIC_NMI_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment