Commit 520045db authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'upstream/xenfs' and 'upstream/core' of...

Merge branches 'upstream/xenfs' and 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen

* 'upstream/xenfs' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen:
  xen/privcmd: make privcmd visible in domU
  xen/privcmd: move remap_domain_mfn_range() to core xen code and export.
  privcmd: MMAPBATCH: Fix error handling/reporting
  xenbus: export xen_store_interface for xenfs
  xen/privcmd: make sure vma is ours before doing anything to it
  xen/privcmd: print SIGBUS faults
  xen/xenfs: set_page_dirty is supposed to return true if it dirties
  xen/privcmd: create address space to allow writable mmaps
  xen: add privcmd driver
  xen: add variable hypercall caller
  xen: add xen_set_domain_pte()
  xen: add /proc/xen/xsd_{kva,port} to xenfs

* 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen: (29 commits)
  xen: include xen/xen.h for definition of xen_initial_domain()
  xen: use host E820 map for dom0
  xen: correctly rebuild mfn list list after migration.
  xen: improvements to VIRQ_DEBUG output
  xen: set up IRQ before binding virq to evtchn
  xen: ensure that all event channels start off bound to VCPU 0
  xen/hvc: only notify if we actually sent something
  xen: don't add extra_pages for RAM after mem_end
  xen: add support for PAT
  xen: make sure xen_max_p2m_pfn is up to date
  xen: limit extra memory to a certain ratio of base
  xen: add extra pages for E820 RAM regions, even if beyond mem_end
  xen: make sure xen_extra_mem_start is beyond all non-RAM e820
  xen: implement "extra" memory to reserve space for pages not present at boot
  xen: Use host-provided E820 map
  xen: don't map missing memory
  xen: defer building p2m mfn structures until kernel is mapped
  xen: add return value to set_phys_to_machine()
  xen: convert p2m to a 3 level tree
  xen: make install_p2mtop_page() static
  ...

Fix up trivial conflict in arch/x86/xen/mmu.c, and fix the use of
'reserve_early()' - in the new memblock world order it is now
'memblock_x86_reserve_range()' instead. Pointed out by Jeremy.
...@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[]; ...@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[];
(type)__res; \ (type)__res; \
}) })
static inline long
privcmd_call(unsigned call,
unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4,
unsigned long a5)
{
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
asm volatile("call *%[call]"
: __HYPERCALL_5PARAM
: [call] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
return (long)__res;
}
static inline int static inline int
HYPERVISOR_set_trap_table(struct trap_info *table) HYPERVISOR_set_trap_table(struct trap_info *table)
{ {
......
...@@ -37,14 +37,21 @@ typedef struct xpaddr { ...@@ -37,14 +37,21 @@ typedef struct xpaddr {
extern unsigned long get_phys_to_machine(unsigned long pfn); extern unsigned long get_phys_to_machine(unsigned long pfn);
extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
static inline unsigned long pfn_to_mfn(unsigned long pfn) static inline unsigned long pfn_to_mfn(unsigned long pfn)
{ {
unsigned long mfn;
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn; return pfn;
return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT; mfn = get_phys_to_machine(pfn);
if (mfn != INVALID_P2M_ENTRY)
mfn &= ~FOREIGN_FRAME_BIT;
return mfn;
} }
static inline int phys_to_machine_mapping_valid(unsigned long pfn) static inline int phys_to_machine_mapping_valid(unsigned long pfn)
...@@ -159,6 +166,7 @@ static inline pte_t __pte_ma(pteval_t x) ...@@ -159,6 +166,7 @@ static inline pte_t __pte_ma(pteval_t x)
#define pgd_val_ma(x) ((x).pgd) #define pgd_val_ma(x) ((x).pgd)
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
xmaddr_t arbitrary_virt_to_machine(void *address); xmaddr_t arbitrary_virt_to_machine(void *address);
unsigned long arbitrary_virt_to_mfn(void *vaddr); unsigned long arbitrary_virt_to_mfn(void *vaddr);
......
...@@ -19,15 +19,12 @@ config XEN_PVHVM ...@@ -19,15 +19,12 @@ config XEN_PVHVM
depends on X86_LOCAL_APIC depends on X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY config XEN_MAX_DOMAIN_MEMORY
int "Maximum allowed size of a domain in gigabytes" int
default 8 if X86_32 default 128
default 32 if X86_64
depends on XEN depends on XEN
help help
The pseudo-physical to machine address array is sized This only affects the sizing of some bss arrays, the unused
according to the maximum possible memory size of a Xen portions of which are freed.
domain. This array uses 1 page per gigabyte, so there's no
need to be too stingy here.
config XEN_SAVE_RESTORE config XEN_SAVE_RESTORE
bool bool
......
...@@ -136,9 +136,6 @@ static void xen_vcpu_setup(int cpu) ...@@ -136,9 +136,6 @@ static void xen_vcpu_setup(int cpu)
info.mfn = arbitrary_virt_to_mfn(vcpup); info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup); info.offset = offset_in_page(vcpup);
printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
cpu, vcpup, info.mfn, info.offset);
/* Check to see if the hypervisor will put the vcpu_info /* Check to see if the hypervisor will put the vcpu_info
structure where we want it, which allows direct access via structure where we want it, which allows direct access via
a percpu-variable. */ a percpu-variable. */
...@@ -152,9 +149,6 @@ static void xen_vcpu_setup(int cpu) ...@@ -152,9 +149,6 @@ static void xen_vcpu_setup(int cpu)
/* This cpu is using the registered vcpu info, even if /* This cpu is using the registered vcpu info, even if
later ones fail to. */ later ones fail to. */
per_cpu(xen_vcpu, cpu) = vcpup; per_cpu(xen_vcpu, cpu) = vcpup;
printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
cpu, vcpup);
} }
} }
...@@ -836,6 +830,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) ...@@ -836,6 +830,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
Xen console noise. */ Xen console noise. */
break; break;
case MSR_IA32_CR_PAT:
if (smp_processor_id() == 0)
xen_set_pat(((u64)high << 32) | low);
break;
default: default:
ret = native_write_msr_safe(msr, low, high); ret = native_write_msr_safe(msr, low, high);
} }
...@@ -874,8 +873,6 @@ void xen_setup_vcpu_info_placement(void) ...@@ -874,8 +873,6 @@ void xen_setup_vcpu_info_placement(void)
/* xen_vcpu_setup managed to place the vcpu_info within the /* xen_vcpu_setup managed to place the vcpu_info within the
percpu area for all cpus, so make use of it */ percpu area for all cpus, so make use of it */
if (have_vcpu_info_placement) { if (have_vcpu_info_placement) {
printk(KERN_INFO "Xen: using vcpu_info placement\n");
pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
...@@ -1189,6 +1186,9 @@ asmlinkage void __init xen_start_kernel(void) ...@@ -1189,6 +1186,9 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n"); xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
init_mm.pgd = pgd; init_mm.pgd = pgd;
/* keep using Xen gdt for now; no urgent need to change it */ /* keep using Xen gdt for now; no urgent need to change it */
......
This diff is collapsed.
...@@ -12,7 +12,6 @@ enum pt_level { ...@@ -12,7 +12,6 @@ enum pt_level {
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
......
...@@ -18,8 +18,10 @@ ...@@ -18,8 +18,10 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/interface/callback.h> #include <xen/interface/callback.h>
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
#include <xen/features.h> #include <xen/features.h>
...@@ -34,6 +36,39 @@ extern void xen_sysenter_target(void); ...@@ -34,6 +36,39 @@ extern void xen_sysenter_target(void);
extern void xen_syscall_target(void); extern void xen_syscall_target(void);
extern void xen_syscall32_target(void); extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */
phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)
static __init void xen_add_extra_mem(unsigned long pages)
{
u64 size = (u64)pages * PAGE_SIZE;
u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
if (!pages)
return;
e820_add_region(extra_start, size, E820_RAM);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
xen_extra_mem_size += size;
xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
}
static unsigned long __init xen_release_chunk(phys_addr_t start_addr, static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
phys_addr_t end_addr) phys_addr_t end_addr)
{ {
...@@ -105,16 +140,65 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, ...@@ -105,16 +140,65 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
/** /**
* machine_specific_memory_setup - Hook for machine specific memory setup. * machine_specific_memory_setup - Hook for machine specific memory setup.
**/ **/
char * __init xen_memory_setup(void) char * __init xen_memory_setup(void)
{ {
static struct e820entry map[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages; unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
int rc;
struct xen_memory_map memmap;
unsigned long extra_pages = 0;
unsigned long extra_limit;
int i;
int op;
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
mem_end = PFN_PHYS(max_pfn);
memmap.nr_entries = E820MAX;
set_xen_guest_handle(memmap.buffer, map);
op = xen_initial_domain() ?
XENMEM_machine_memory_map :
XENMEM_memory_map;
rc = HYPERVISOR_memory_op(op, &memmap);
if (rc == -ENOSYS) {
memmap.nr_entries = 1;
map[0].addr = 0ULL;
map[0].size = mem_end;
/* 8MB slack (to balance backend allocations). */
map[0].size += 8ULL << 20;
map[0].type = E820_RAM;
rc = 0;
}
BUG_ON(rc);
e820.nr_map = 0; e820.nr_map = 0;
xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end = map[i].addr + map[i].size;
if (map[i].type == E820_RAM) {
if (map[i].addr < mem_end && end > mem_end) {
/* Truncate region to max_mem. */
u64 delta = end - mem_end;
map[i].size -= delta;
extra_pages += PFN_DOWN(delta);
end = mem_end;
}
}
e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM); if (end > xen_extra_mem_start)
xen_extra_mem_start = end;
/* If region is non-RAM or below mem_end, add what remains */
if ((map[i].type != E820_RAM || map[i].addr < mem_end) &&
map[i].size > 0)
e820_add_region(map[i].addr, map[i].size, map[i].type);
}
/* /*
* Even though this is normal, usable memory under Xen, reserve * Even though this is normal, usable memory under Xen, reserve
...@@ -136,7 +220,29 @@ char * __init xen_memory_setup(void) ...@@ -136,7 +220,29 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
xen_return_unused_memory(xen_start_info->nr_pages, &e820); extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
* size is the full initial memory allocation; on highmem it
* is limited to the max size of lowmem, so that it doesn't
* get completely filled.
*
* In principle there could be a problem in lowmem systems if
* the initial memory is also very large with respect to
* lowmem, but we won't try to deal with that here.
*/
extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
max_pfn + extra_pages);
if (extra_limit >= max_pfn)
extra_pages = extra_limit - max_pfn;
else
extra_pages = 0;
if (!xen_initial_domain())
xen_add_extra_mem(extra_pages);
return "Xen"; return "Xen";
} }
......
...@@ -30,6 +30,9 @@ void xen_setup_machphys_mapping(void); ...@@ -30,6 +30,9 @@ void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_ident_map_ISA(void); void xen_ident_map_ISA(void);
void xen_reserve_top(void); void xen_reserve_top(void);
extern unsigned long xen_max_p2m_pfn;
void xen_set_pat(u64);
char * __init xen_memory_setup(void); char * __init xen_memory_setup(void);
void __init xen_arch_setup(void); void __init xen_arch_setup(void);
......
...@@ -74,6 +74,7 @@ static int __write_console(const char *data, int len) ...@@ -74,6 +74,7 @@ static int __write_console(const char *data, int len)
wmb(); /* write ring before updating pointer */ wmb(); /* write ring before updating pointer */
intf->out_prod = prod; intf->out_prod = prod;
if (sent)
notify_daemon(); notify_daemon();
return sent; return sent;
} }
......
...@@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void) ...@@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void)
} }
#endif #endif
memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
} }
static inline void clear_evtchn(int port) static inline void clear_evtchn(int port)
...@@ -377,7 +377,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) ...@@ -377,7 +377,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = find_unbound_irq(); irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_edge_irq, "event"); handle_fasteoi_irq, "event");
evtchn_to_irq[evtchn] = irq; evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn); irq_info[irq] = mk_evtchn_info(evtchn);
...@@ -435,6 +435,11 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) ...@@ -435,6 +435,11 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = per_cpu(virq_to_irq, cpu)[virq]; irq = per_cpu(virq_to_irq, cpu)[virq];
if (irq == -1) { if (irq == -1) {
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
bind_virq.virq = virq; bind_virq.virq = virq;
bind_virq.vcpu = cpu; bind_virq.vcpu = cpu;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
...@@ -442,11 +447,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) ...@@ -442,11 +447,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
BUG(); BUG();
evtchn = bind_virq.port; evtchn = bind_virq.port;
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
evtchn_to_irq[evtchn] = irq; evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq); irq_info[irq] = mk_virq_info(evtchn, virq);
...@@ -578,41 +578,75 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) ...@@ -578,41 +578,75 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{ {
struct shared_info *sh = HYPERVISOR_shared_info; struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
int i; int i;
unsigned long flags; unsigned long flags;
static DEFINE_SPINLOCK(debug_lock); static DEFINE_SPINLOCK(debug_lock);
struct vcpu_info *v;
spin_lock_irqsave(&debug_lock, flags); spin_lock_irqsave(&debug_lock, flags);
printk("vcpu %d\n ", cpu); printk("\nvcpu %d\n ", cpu);
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct vcpu_info *v = per_cpu(xen_vcpu, i); int pending;
printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, v = per_cpu(xen_vcpu, i);
(get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, pending = (get_irq_regs() && i == cpu)
v->evtchn_upcall_pending, ? xen_irqs_disabled(get_irq_regs())
: v->evtchn_upcall_mask;
printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
pending, v->evtchn_upcall_pending,
(int)(sizeof(v->evtchn_pending_sel)*2),
v->evtchn_pending_sel); v->evtchn_pending_sel);
} }
printk("pending:\n "); v = per_cpu(xen_vcpu, cpu);
for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
printk("%08lx%s", sh->evtchn_pending[i], printk("\npending:\n ");
for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
sh->evtchn_pending[i],
i % 8 == 0 ? "\n " : " ");
printk("\nglobal mask:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
printk("%0*lx%s",
(int)(sizeof(sh->evtchn_mask[0])*2),
sh->evtchn_mask[i],
i % 8 == 0 ? "\n " : " "); i % 8 == 0 ? "\n " : " ");
printk("\nmasks:\n ");
for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) printk("\nglobally unmasked:\n ");
printk("%08lx%s", sh->evtchn_mask[i], for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
i % 8 == 0 ? "\n " : " "); i % 8 == 0 ? "\n " : " ");
printk("\nunmasked:\n "); printk("\nlocal cpu%d mask:\n ", cpu);
for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
cpu_evtchn[i],
i % 8 == 0 ? "\n " : " "); i % 8 == 0 ? "\n " : " ");
printk("\nlocally unmasked:\n ");
for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
unsigned long pending = sh->evtchn_pending[i]
& ~sh->evtchn_mask[i]
& cpu_evtchn[i];
printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
pending, i % 8 == 0 ? "\n " : " ");
}
printk("\npending list:\n"); printk("\npending list:\n");
for(i = 0; i < NR_EVENT_CHANNELS; i++) { for (i = 0; i < NR_EVENT_CHANNELS; i++) {
if (sync_test_bit(i, sh->evtchn_pending)) { if (sync_test_bit(i, sh->evtchn_pending)) {
printk(" %d: event %d -> irq %d\n", int word_idx = i / BITS_PER_LONG;
printk(" %d: event %d -> irq %d%s%s%s\n",
cpu_from_evtchn(i), i, cpu_from_evtchn(i), i,
evtchn_to_irq[i]); evtchn_to_irq[i],
sync_test_bit(word_idx, &v->evtchn_pending_sel)
? "" : " l2-clear",
!sync_test_bit(i, sh->evtchn_mask)
? "" : " globally-masked",
sync_test_bit(i, cpu_evtchn)
? "" : " locally-masked");
} }
} }
...@@ -663,6 +697,9 @@ static void __xen_evtchn_do_upcall(void) ...@@ -663,6 +697,9 @@ static void __xen_evtchn_do_upcall(void)
int irq = evtchn_to_irq[port]; int irq = evtchn_to_irq[port];
struct irq_desc *desc; struct irq_desc *desc;
mask_evtchn(port);
clear_evtchn(port);
if (irq != -1) { if (irq != -1) {
desc = irq_to_desc(irq); desc = irq_to_desc(irq);
if (desc) if (desc)
...@@ -800,10 +837,10 @@ static void ack_dynirq(unsigned int irq) ...@@ -800,10 +837,10 @@ static void ack_dynirq(unsigned int irq)
{ {
int evtchn = evtchn_from_irq(irq); int evtchn = evtchn_from_irq(irq);
move_native_irq(irq); move_masked_irq(irq);
if (VALID_EVTCHN(evtchn)) if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn); unmask_evtchn(evtchn);
} }
static int retrigger_dynirq(unsigned int irq) static int retrigger_dynirq(unsigned int irq)
...@@ -959,7 +996,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { ...@@ -959,7 +996,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.mask = disable_dynirq, .mask = disable_dynirq,
.unmask = enable_dynirq, .unmask = enable_dynirq,
.ack = ack_dynirq, .eoi = ack_dynirq,
.set_affinity = set_affinity_irq, .set_affinity = set_affinity_irq,
.retrigger = retrigger_dynirq, .retrigger = retrigger_dynirq,
}; };
......
...@@ -64,9 +64,11 @@ ...@@ -64,9 +64,11 @@
int xen_store_evtchn; int xen_store_evtchn;
EXPORT_SYMBOL(xen_store_evtchn); EXPORT_SYMBOL_GPL(xen_store_evtchn);
struct xenstore_domain_interface *xen_store_interface; struct xenstore_domain_interface *xen_store_interface;
EXPORT_SYMBOL_GPL(xen_store_interface);
static unsigned long xen_store_mfn; static unsigned long xen_store_mfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
......
obj-$(CONFIG_XENFS) += xenfs.o obj-$(CONFIG_XENFS) += xenfs.o
xenfs-objs = super.o xenbus.o xenfs-y = super.o xenbus.o privcmd.o
\ No newline at end of file xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
/******************************************************************************
* privcmd.c
*
* Interface to privileged domain-0 commands.
*
* Copyright (c) 2002-2004, K A Fraser, B Dragovic
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/swap.h>
#include <linux/smp_lock.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
static long privcmd_ioctl_hypercall(void __user *udata)
{
struct privcmd_hypercall hypercall;
long ret;
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
return -EFAULT;
ret = privcmd_call(hypercall.op,
hypercall.arg[0], hypercall.arg[1],
hypercall.arg[2], hypercall.arg[3],
hypercall.arg[4]);
return ret;
}
static void free_page_list(struct list_head *pages)
{
struct page *p, *n;
list_for_each_entry_safe(p, n, pages, lru)
__free_page(p);
INIT_LIST_HEAD(pages);
}
/*
* Given an array of items in userspace, return a list of pages
* containing the data. If copying fails, either because of memory
* allocation failure or a problem reading user memory, return an
* error code; its up to the caller to dispose of any partial list.
*/
static int gather_array(struct list_head *pagelist,
unsigned nelem, size_t size,
void __user *data)
{
unsigned pageidx;
void *pagedata;
int ret;
if (size > PAGE_SIZE)
return 0;
pageidx = PAGE_SIZE;
pagedata = NULL; /* quiet, gcc */
while (nelem--) {
if (pageidx > PAGE_SIZE-size) {
struct page *page = alloc_page(GFP_KERNEL);
ret = -ENOMEM;
if (page == NULL)
goto fail;
pagedata = page_address(page);
list_add_tail(&page->lru, pagelist);
pageidx = 0;
}
ret = -EFAULT;
if (copy_from_user(pagedata + pageidx, data, size))
goto fail;
data += size;
pageidx += size;
}
ret = 0;
fail:
return ret;
}
/*
* Call function "fn" on each element of the array fragmented
* over a list of pages.
*/
static int traverse_pages(unsigned nelem, size_t size,
struct list_head *pos,
int (*fn)(void *data, void *state),
void *state)
{
void *pagedata;
unsigned pageidx;
int ret = 0;
BUG_ON(size > PAGE_SIZE);
pageidx = PAGE_SIZE;
pagedata = NULL; /* hush, gcc */
while (nelem--) {
if (pageidx > PAGE_SIZE-size) {
struct page *page;
pos = pos->next;
page = list_entry(pos, struct page, lru);
pagedata = page_address(page);
pageidx = 0;
}
ret = (*fn)(pagedata + pageidx, state);
if (ret)
break;
pageidx += size;
}
return ret;
}
struct mmap_mfn_state {
unsigned long va;
struct vm_area_struct *vma;
domid_t domain;
};
static int mmap_mfn_range(void *data, void *state)
{
struct privcmd_mmap_entry *msg = data;
struct mmap_mfn_state *st = state;
struct vm_area_struct *vma = st->vma;
int rc;
/* Do not allow range to wrap the address space. */
if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
return -EINVAL;
/* Range chunks must be contiguous in va space. */
if ((msg->va != st->va) ||
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL;
rc = xen_remap_domain_mfn_range(vma,
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
st->domain);
if (rc < 0)
return rc;
st->va += msg->npages << PAGE_SHIFT;
return 0;
}
static long privcmd_ioctl_mmap(void __user *udata)
{
struct privcmd_mmap mmapcmd;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
LIST_HEAD(pagelist);
struct mmap_mfn_state state;
if (!xen_initial_domain())
return -EPERM;
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
rc = gather_array(&pagelist,
mmapcmd.num, sizeof(struct privcmd_mmap_entry),
mmapcmd.entry);
if (rc || list_empty(&pagelist))
goto out;
down_write(&mm->mmap_sem);
{
struct page *page = list_first_entry(&pagelist,
struct page, lru);
struct privcmd_mmap_entry *msg = page_address(page);
vma = find_vma(mm, msg->va);
rc = -EINVAL;
if (!vma || (msg->va != vma->vm_start) ||
!privcmd_enforce_singleshot_mapping(vma))
goto out_up;
}
state.va = vma->vm_start;
state.vma = vma;
state.domain = mmapcmd.dom;
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist,
mmap_mfn_range, &state);
out_up:
up_write(&mm->mmap_sem);
out:
free_page_list(&pagelist);
return rc;
}
struct mmap_batch_state {
domid_t domain;
unsigned long va;
struct vm_area_struct *vma;
int err;
xen_pfn_t __user *user;
};
static int mmap_batch_fn(void *data, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
st->vma->vm_page_prot, st->domain) < 0) {
*mfnp |= 0xf0000000U;
st->err++;
}
st->va += PAGE_SIZE;
return 0;
}
static int mmap_return_errors(void *data, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
put_user(*mfnp, st->user++);
return 0;
}
static struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata)
{
int ret;
struct privcmd_mmapbatch m;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long nr_pages;
LIST_HEAD(pagelist);
struct mmap_batch_state state;
if (!xen_initial_domain())
return -EPERM;
if (copy_from_user(&m, udata, sizeof(m)))
return -EFAULT;
nr_pages = m.num;
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
return -EINVAL;
ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
m.arr);
if (ret || list_empty(&pagelist))
goto out;
down_write(&mm->mmap_sem);
vma = find_vma(mm, m.addr);
ret = -EINVAL;
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
(m.addr != vma->vm_start) ||
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
!privcmd_enforce_singleshot_mapping(vma)) {
up_write(&mm->mmap_sem);
goto out;
}
state.domain = m.dom;
state.vma = vma;
state.va = m.addr;
state.err = 0;
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state);
up_write(&mm->mmap_sem);
if (state.err > 0) {
ret = 0;
state.user = m.arr;
traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist,
mmap_return_errors, &state);
}
out:
free_page_list(&pagelist);
return ret;
}
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
int ret = -ENOSYS;
void __user *udata = (void __user *) data;
switch (cmd) {
case IOCTL_PRIVCMD_HYPERCALL:
ret = privcmd_ioctl_hypercall(udata);
break;
case IOCTL_PRIVCMD_MMAP:
ret = privcmd_ioctl_mmap(udata);
break;
case IOCTL_PRIVCMD_MMAPBATCH:
ret = privcmd_ioctl_mmap_batch(udata);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
vma, vma->vm_start, vma->vm_end,
vmf->pgoff, vmf->virtual_address);
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct privcmd_vm_ops = {
.fault = privcmd_fault
};
static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{
/* Unsupported for auto-translate guests. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return -ENOSYS;
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;
return 0;
}
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
return (xchg(&vma->vm_private_data, (void *)1) == NULL);
}
#endif
const struct file_operations privcmd_file_ops = {
.unlocked_ioctl = privcmd_ioctl,
.mmap = privcmd_mmap,
};
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <xen/xen.h> #include <xen/xen.h>
...@@ -22,6 +24,62 @@ ...@@ -22,6 +24,62 @@
MODULE_DESCRIPTION("Xen filesystem"); MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static int xenfs_set_page_dirty(struct page *page)
{
return !TestSetPageDirty(page);
}
static const struct address_space_operations xenfs_aops = {
.set_page_dirty = xenfs_set_page_dirty,
};
static struct backing_dev_info xenfs_backing_dev_info = {
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
{
struct inode *ret = new_inode(sb);
if (ret) {
ret->i_mode = mode;
ret->i_mapping->a_ops = &xenfs_aops;
ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info;
ret->i_uid = ret->i_gid = 0;
ret->i_blocks = 0;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
}
return ret;
}
static struct dentry *xenfs_create_file(struct super_block *sb,
struct dentry *parent,
const char *name,
const struct file_operations *fops,
void *data,
int mode)
{
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
inode = xenfs_make_inode(sb, S_IFREG | mode);
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_fop = fops;
inode->i_private = data;
d_add(dentry, inode);
return dentry;
}
static ssize_t capabilities_read(struct file *file, char __user *buf, static ssize_t capabilities_read(struct file *file, char __user *buf,
size_t size, loff_t *off) size_t size, loff_t *off)
{ {
...@@ -44,10 +102,23 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -44,10 +102,23 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
[1] = {}, [1] = {},
{ "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR }, { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO }, { "capabilities", &capabilities_file_ops, S_IRUGO },
{ "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
{""}, {""},
}; };
int rc;
return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files); rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
if (rc < 0)
return rc;
if (xen_initial_domain()) {
xenfs_create_file(sb, sb->s_root, "xsd_kva",
&xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
xenfs_create_file(sb, sb->s_root, "xsd_port",
&xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
}
return rc;
} }
static int xenfs_get_sb(struct file_system_type *fs_type, static int xenfs_get_sb(struct file_system_type *fs_type,
...@@ -66,11 +137,25 @@ static struct file_system_type xenfs_type = { ...@@ -66,11 +137,25 @@ static struct file_system_type xenfs_type = {
static int __init xenfs_init(void) static int __init xenfs_init(void)
{ {
if (xen_domain()) int err;
return register_filesystem(&xenfs_type); if (!xen_domain()) {
printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n");
printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
return 0; return 0;
}
err = register_filesystem(&xenfs_type);
if (err) {
printk(KERN_ERR "xenfs: Unable to register filesystem!\n");
goto out;
}
err = bdi_init(&xenfs_backing_dev_info);
if (err)
unregister_filesystem(&xenfs_type);
out:
return err;
} }
static void __exit xenfs_exit(void) static void __exit xenfs_exit(void)
......
...@@ -2,5 +2,8 @@ ...@@ -2,5 +2,8 @@
#define _XENFS_XENBUS_H #define _XENFS_XENBUS_H
extern const struct file_operations xenbus_file_ops; extern const struct file_operations xenbus_file_ops;
extern const struct file_operations privcmd_file_ops;
extern const struct file_operations xsd_kva_file_ops;
extern const struct file_operations xsd_port_file_ops;
#endif /* _XENFS_XENBUS_H */ #endif /* _XENFS_XENBUS_H */
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <xen/page.h>
#include "xenfs.h"
#include "../xenbus/xenbus_comms.h"
static ssize_t xsd_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
const char *str = (const char *)file->private_data;
return simple_read_from_buffer(buf, size, off, str, strlen(str));
}
static int xsd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static int xsd_kva_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
xen_store_interface);
if (!file->private_data)
return -ENOMEM;
return 0;
}
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
return -EINVAL;
if (remap_pfn_range(vma, vma->vm_start,
virt_to_pfn(xen_store_interface),
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
const struct file_operations xsd_kva_file_ops = {
.open = xsd_kva_open,
.mmap = xsd_kva_mmap,
.read = xsd_read,
.release = xsd_release,
};
static int xsd_port_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)kasprintf(GFP_KERNEL, "%d",
xen_store_evtchn);
if (!file->private_data)
return -ENOMEM;
return 0;
}
const struct file_operations xsd_port_file_ops = {
.open = xsd_port_open,
.read = xsd_read,
.release = xsd_release,
};
header-y += evtchn.h header-y += evtchn.h
header-y += privcmd.h
...@@ -186,6 +186,35 @@ struct xen_translate_gpfn_list { ...@@ -186,6 +186,35 @@ struct xen_translate_gpfn_list {
}; };
DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
/*
* Returns the pseudo-physical memory map as it was when the domain
* was started (specified by XENMEM_set_memory_map).
* arg == addr of struct xen_memory_map.
*/
#define XENMEM_memory_map 9
struct xen_memory_map {
/*
* On call the number of entries which can be stored in buffer. On
* return the number of entries which have been stored in
* buffer.
*/
unsigned int nr_entries;
/*
* Entries in the buffer are in the same format as returned by the
* BIOS INT 0x15 EAX=0xE820 call.
*/
GUEST_HANDLE(void) buffer;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
/*
* Returns the real physical memory map. Passes the same structure as
* XENMEM_memory_map.
* arg == addr of struct xen_memory_map.
*/
#define XENMEM_machine_memory_map 10
/* /*
* Prevent the balloon driver from changing the memory reservation * Prevent the balloon driver from changing the memory reservation
......
/******************************************************************************
* privcmd.h
*
* Interface to /proc/xen/privcmd.
*
* Copyright (c) 2003-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __LINUX_PUBLIC_PRIVCMD_H__
#define __LINUX_PUBLIC_PRIVCMD_H__
#include <linux/types.h>
typedef unsigned long xen_pfn_t;
#ifndef __user
#define __user
#endif
struct privcmd_hypercall {
__u64 op;
__u64 arg[5];
};
struct privcmd_mmap_entry {
__u64 va;
__u64 mfn;
__u64 npages;
};
struct privcmd_mmap {
int num;
domid_t dom; /* target domain */
struct privcmd_mmap_entry __user *entry;
};
struct privcmd_mmapbatch {
int num; /* number of pages to populate */
domid_t dom; /* target domain */
__u64 addr; /* virtual address */
xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
};
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
* Return: Value returned from execution of the specified hypercall.
*/
#define IOCTL_PRIVCMD_HYPERCALL \
_IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall))
#define IOCTL_PRIVCMD_MMAP \
_IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap))
#define IOCTL_PRIVCMD_MMAPBATCH \
_IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
...@@ -23,4 +23,9 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order, ...@@ -23,4 +23,9 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
unsigned long mfn, int nr,
pgprot_t prot, unsigned domid);
#endif /* INCLUDE_XEN_OPS_H */ #endif /* INCLUDE_XEN_OPS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment