Commit bf98ecbb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.16b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - a series to speed up the boot of Xen PV guests

 - some cleanups in Xen related code

 - replacement of license texts with the appropriate SPDX headers and
   fixing of wrong SPDX headers in Xen header files

 - a small series making paravirtualized interrupt masking much simpler
   and at the same time removing complaints of objtool

 - a fix for Xen ballooning hogging workqueues for too long

 - enablement of the Xen pciback driver for Arm

 - some further small fixes/enhancements

* tag 'for-linus-5.16b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (22 commits)
  xen/balloon: fix unused-variable warning
  xen/balloon: rename alloc/free_xenballooned_pages
  xen/balloon: add late_initcall_sync() for initial ballooning done
  x86/xen: remove 32-bit awareness from startup_xen
  xen: remove highmem remnants
  xen: allow pv-only hypercalls only with CONFIG_XEN_PV
  x86/xen: remove 32-bit pv leftovers
  xen-pciback: allow compiling on other archs than x86
  x86/xen: switch initial pvops IRQ functions to dummy ones
  x86/xen: remove xen_have_vcpu_info_placement flag
  x86/pvh: add prototype for xen_pvh_init()
  xen: Fix implicit type conversion
  xen: fix wrong SPDX headers of Xen related headers
  xen/pvcalls-back: Remove redundant 'flush_workqueue()' calls
  x86/xen: Remove redundant irq_enter/exit() invocations
  xen-pciback: Fix return in pm_ctrl_init()
  xen/x86: restrict PV Dom0 identity mapping
  xen/x86: there's no highmem anymore in PV mode
  xen/x86: adjust handling of the L3 user vsyscall special page table
  xen/x86: adjust xen_set_fixmap()
  ...
parents 4287af35 501586ea
...@@ -6379,6 +6379,13 @@ ...@@ -6379,6 +6379,13 @@
improve timer resolution at the expense of processing improve timer resolution at the expense of processing
more timer interrupts. more timer interrupts.
xen.balloon_boot_timeout= [XEN]
The time (in seconds) to wait before giving up to boot
in case initial ballooning fails to free enough memory.
Applies only when running as HVM or PVH guest and
started with less memory configured than allowed at
max. Default is 180.
xen.event_eoi_delay= [XEN] xen.event_eoi_delay= [XEN]
How long to delay EOI handling in case of event How long to delay EOI handling in case of event
storms (jiffies). Default is 10. storms (jiffies). Default is 10.
......
...@@ -442,7 +442,6 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); ...@@ -442,7 +442,6 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw); EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist); EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
......
...@@ -88,7 +88,6 @@ HYPERCALL2(hvm_op); ...@@ -88,7 +88,6 @@ HYPERCALL2(hvm_op);
HYPERCALL2(memory_op); HYPERCALL2(memory_op);
HYPERCALL2(physdev_op); HYPERCALL2(physdev_op);
HYPERCALL3(vcpu_op); HYPERCALL3(vcpu_op);
HYPERCALL1(tmem_op);
HYPERCALL1(platform_op_raw); HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall); HYPERCALL2(multicall);
HYPERCALL2(vm_assist); HYPERCALL2(vm_assist);
......
...@@ -80,7 +80,6 @@ HYPERCALL2(hvm_op); ...@@ -80,7 +80,6 @@ HYPERCALL2(hvm_op);
HYPERCALL2(memory_op); HYPERCALL2(memory_op);
HYPERCALL2(physdev_op); HYPERCALL2(physdev_op);
HYPERCALL3(vcpu_op); HYPERCALL3(vcpu_op);
HYPERCALL1(tmem_op);
HYPERCALL1(platform_op_raw); HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall); HYPERCALL2(multicall);
HYPERCALL2(vm_assist); HYPERCALL2(vm_assist);
......
...@@ -577,7 +577,9 @@ void paravirt_leave_lazy_mmu(void); ...@@ -577,7 +577,9 @@ void paravirt_leave_lazy_mmu(void);
void paravirt_flush_lazy_mmu(void); void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void); void _paravirt_nop(void);
void paravirt_BUG(void);
u64 _paravirt_ident_64(u64); u64 _paravirt_ident_64(u64);
unsigned long paravirt_ret0(void);
#define paravirt_nop ((void *)_paravirt_nop) #define paravirt_nop ((void *)_paravirt_nop)
......
...@@ -248,6 +248,7 @@ privcmd_call(unsigned int call, ...@@ -248,6 +248,7 @@ privcmd_call(unsigned int call,
return res; return res;
} }
#ifdef CONFIG_XEN_PV
static inline int static inline int
HYPERVISOR_set_trap_table(struct trap_info *table) HYPERVISOR_set_trap_table(struct trap_info *table)
{ {
...@@ -280,6 +281,107 @@ HYPERVISOR_callback_op(int cmd, void *arg) ...@@ -280,6 +281,107 @@ HYPERVISOR_callback_op(int cmd, void *arg)
return _hypercall2(int, callback_op, cmd, arg); return _hypercall2(int, callback_op, cmd, arg);
} }
static inline int
HYPERVISOR_set_debugreg(int reg, unsigned long value)
{
return _hypercall2(int, set_debugreg, reg, value);
}
static inline unsigned long
HYPERVISOR_get_debugreg(int reg)
{
return _hypercall1(unsigned long, get_debugreg, reg);
}
static inline int
HYPERVISOR_update_descriptor(u64 ma, u64 desc)
{
return _hypercall2(int, update_descriptor, ma, desc);
}
static inline int
HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
unsigned long flags)
{
return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
}
static inline int
HYPERVISOR_set_segment_base(int reg, unsigned long value)
{
return _hypercall2(int, set_segment_base, reg, value);
}
static inline void
MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
{
mcl->op = __HYPERVISOR_fpu_taskswitch;
mcl->args[0] = set;
trace_xen_mc_entry(mcl, 1);
}
static inline void
MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
pte_t new_val, unsigned long flags)
{
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = va;
mcl->args[1] = new_val.pte;
mcl->args[2] = flags;
trace_xen_mc_entry(mcl, 3);
}
static inline void
MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
mcl->op = __HYPERVISOR_update_descriptor;
mcl->args[0] = maddr;
mcl->args[1] = *(unsigned long *)&desc;
trace_xen_mc_entry(mcl, 2);
}
static inline void
MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
int count, int *success_count, domid_t domid)
{
mcl->op = __HYPERVISOR_mmu_update;
mcl->args[0] = (unsigned long)req;
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
trace_xen_mc_entry(mcl, 4);
}
static inline void
MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
int *success_count, domid_t domid)
{
mcl->op = __HYPERVISOR_mmuext_op;
mcl->args[0] = (unsigned long)op;
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
trace_xen_mc_entry(mcl, 4);
}
static inline void
MULTI_stack_switch(struct multicall_entry *mcl,
unsigned long ss, unsigned long esp)
{
mcl->op = __HYPERVISOR_stack_switch;
mcl->args[0] = ss;
mcl->args[1] = esp;
trace_xen_mc_entry(mcl, 2);
}
#endif
static inline int static inline int
HYPERVISOR_sched_op(int cmd, void *arg) HYPERVISOR_sched_op(int cmd, void *arg)
{ {
...@@ -308,26 +410,6 @@ HYPERVISOR_platform_op(struct xen_platform_op *op) ...@@ -308,26 +410,6 @@ HYPERVISOR_platform_op(struct xen_platform_op *op)
return _hypercall1(int, platform_op, op); return _hypercall1(int, platform_op, op);
} }
static __always_inline int
HYPERVISOR_set_debugreg(int reg, unsigned long value)
{
return _hypercall2(int, set_debugreg, reg, value);
}
static __always_inline unsigned long
HYPERVISOR_get_debugreg(int reg)
{
return _hypercall1(unsigned long, get_debugreg, reg);
}
static inline int
HYPERVISOR_update_descriptor(u64 ma, u64 desc)
{
if (sizeof(u64) == sizeof(long))
return _hypercall2(int, update_descriptor, ma, desc);
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
}
static inline long static inline long
HYPERVISOR_memory_op(unsigned int cmd, void *arg) HYPERVISOR_memory_op(unsigned int cmd, void *arg)
{ {
...@@ -340,18 +422,6 @@ HYPERVISOR_multicall(void *call_list, uint32_t nr_calls) ...@@ -340,18 +422,6 @@ HYPERVISOR_multicall(void *call_list, uint32_t nr_calls)
return _hypercall2(int, multicall, call_list, nr_calls); return _hypercall2(int, multicall, call_list, nr_calls);
} }
static inline int
HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
unsigned long flags)
{
if (sizeof(new_val) == sizeof(long))
return _hypercall3(int, update_va_mapping, va,
new_val.pte, flags);
else
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
}
static inline int static inline int
HYPERVISOR_event_channel_op(int cmd, void *arg) HYPERVISOR_event_channel_op(int cmd, void *arg)
{ {
...@@ -394,14 +464,6 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) ...@@ -394,14 +464,6 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
} }
#ifdef CONFIG_X86_64
static inline int
HYPERVISOR_set_segment_base(int reg, unsigned long value)
{
return _hypercall2(int, set_segment_base, reg, value);
}
#endif
static inline int static inline int
HYPERVISOR_suspend(unsigned long start_info_mfn) HYPERVISOR_suspend(unsigned long start_info_mfn)
{ {
...@@ -422,13 +484,6 @@ HYPERVISOR_hvm_op(int op, void *arg) ...@@ -422,13 +484,6 @@ HYPERVISOR_hvm_op(int op, void *arg)
return _hypercall2(unsigned long, hvm_op, op, arg); return _hypercall2(unsigned long, hvm_op, op, arg);
} }
static inline int
HYPERVISOR_tmem_op(
struct tmem_op *op)
{
return _hypercall1(int, tmem_op, op);
}
static inline int static inline int
HYPERVISOR_xenpmu_op(unsigned int op, void *arg) HYPERVISOR_xenpmu_op(unsigned int op, void *arg)
{ {
...@@ -446,88 +501,4 @@ HYPERVISOR_dm_op( ...@@ -446,88 +501,4 @@ HYPERVISOR_dm_op(
return ret; return ret;
} }
static inline void
MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
{
mcl->op = __HYPERVISOR_fpu_taskswitch;
mcl->args[0] = set;
trace_xen_mc_entry(mcl, 1);
}
static inline void
MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
pte_t new_val, unsigned long flags)
{
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = va;
if (sizeof(new_val) == sizeof(long)) {
mcl->args[1] = new_val.pte;
mcl->args[2] = flags;
} else {
mcl->args[1] = new_val.pte;
mcl->args[2] = new_val.pte >> 32;
mcl->args[3] = flags;
}
trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
}
static inline void
MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
mcl->op = __HYPERVISOR_update_descriptor;
if (sizeof(maddr) == sizeof(long)) {
mcl->args[0] = maddr;
mcl->args[1] = *(unsigned long *)&desc;
} else {
u32 *p = (u32 *)&desc;
mcl->args[0] = maddr;
mcl->args[1] = maddr >> 32;
mcl->args[2] = *p++;
mcl->args[3] = *p;
}
trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
}
static inline void
MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
int count, int *success_count, domid_t domid)
{
mcl->op = __HYPERVISOR_mmu_update;
mcl->args[0] = (unsigned long)req;
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
trace_xen_mc_entry(mcl, 4);
}
static inline void
MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
int *success_count, domid_t domid)
{
mcl->op = __HYPERVISOR_mmuext_op;
mcl->args[0] = (unsigned long)op;
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
trace_xen_mc_entry(mcl, 4);
}
static inline void
MULTI_stack_switch(struct multicall_entry *mcl,
unsigned long ss, unsigned long esp)
{
mcl->op = __HYPERVISOR_stack_switch;
mcl->args[0] = ss;
mcl->args[1] = esp;
trace_xen_mc_entry(mcl, 2);
}
#endif /* _ASM_X86_XEN_HYPERCALL_H */ #endif /* _ASM_X86_XEN_HYPERCALL_H */
...@@ -62,4 +62,8 @@ void xen_arch_register_cpu(int num); ...@@ -62,4 +62,8 @@ void xen_arch_register_cpu(int num);
void xen_arch_unregister_cpu(int num); void xen_arch_unregister_cpu(int num);
#endif #endif
#ifdef CONFIG_PVH
void __init xen_pvh_init(struct boot_params *boot_params);
#endif
#endif /* _ASM_X86_XEN_HYPERVISOR_H */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
...@@ -22,25 +22,6 @@ static inline int __init pci_xen_initial_domain(void) ...@@ -22,25 +22,6 @@ static inline int __init pci_xen_initial_domain(void)
return -1; return -1;
} }
#endif #endif
#ifdef CONFIG_XEN_DOM0
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;
}
static inline int xen_register_device_domain_owner(struct pci_dev *dev,
uint16_t domain)
{
return -1;
}
static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
{
return -1;
}
#endif
#if defined(CONFIG_PCI_MSI) #if defined(CONFIG_PCI_MSI)
#if defined(CONFIG_PCI_XEN) #if defined(CONFIG_PCI_XEN)
......
...@@ -46,6 +46,17 @@ asm (".pushsection .entry.text, \"ax\"\n" ...@@ -46,6 +46,17 @@ asm (".pushsection .entry.text, \"ax\"\n"
".type _paravirt_nop, @function\n\t" ".type _paravirt_nop, @function\n\t"
".popsection"); ".popsection");
/* stub always returning 0. */
asm (".pushsection .entry.text, \"ax\"\n"
".global paravirt_ret0\n"
"paravirt_ret0:\n\t"
"xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
"ret\n\t"
".size paravirt_ret0, . - paravirt_ret0\n\t"
".type paravirt_ret0, @function\n\t"
".popsection");
void __init default_banner(void) void __init default_banner(void)
{ {
printk(KERN_INFO "Booting paravirtualized kernel on %s\n", printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
...@@ -53,7 +64,7 @@ void __init default_banner(void) ...@@ -53,7 +64,7 @@ void __init default_banner(void)
} }
/* Undefined instruction for dealing with missing ops pointers. */ /* Undefined instruction for dealing with missing ops pointers. */
static void paravirt_BUG(void) noinstr void paravirt_BUG(void)
{ {
BUG(); BUG();
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <xen/features.h> #include <xen/features.h>
#include <xen/events.h> #include <xen/events.h>
#include <xen/pci.h>
#include <asm/xen/pci.h> #include <asm/xen/pci.h>
#include <asm/xen/cpuid.h> #include <asm/xen/cpuid.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -585,78 +586,3 @@ int __init pci_xen_initial_domain(void) ...@@ -585,78 +586,3 @@ int __init pci_xen_initial_domain(void)
} }
#endif #endif
#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
struct pci_dev *dev;
struct list_head list;
};
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
{
struct xen_device_domain_owner *owner;
list_for_each_entry(owner, &dev_domain_list, list) {
if (owner->dev == dev)
return owner;
}
return NULL;
}
int xen_find_device_domain_owner(struct pci_dev *dev)
{
struct xen_device_domain_owner *owner;
int domain = -ENODEV;
spin_lock(&dev_domain_list_spinlock);
owner = find_device(dev);
if (owner)
domain = owner->domain;
spin_unlock(&dev_domain_list_spinlock);
return domain;
}
EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
{
struct xen_device_domain_owner *owner;
owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
if (!owner)
return -ENODEV;
spin_lock(&dev_domain_list_spinlock);
if (find_device(dev)) {
spin_unlock(&dev_domain_list_spinlock);
kfree(owner);
return -EEXIST;
}
owner->domain = domain;
owner->dev = dev;
list_add_tail(&owner->list, &dev_domain_list);
spin_unlock(&dev_domain_list_spinlock);
return 0;
}
EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
int xen_unregister_device_domain_owner(struct pci_dev *dev)
{
struct xen_device_domain_owner *owner;
spin_lock(&dev_domain_list_spinlock);
owner = find_device(dev);
if (!owner) {
spin_unlock(&dev_domain_list_spinlock);
return -ENODEV;
}
list_del(&owner->list);
spin_unlock(&dev_domain_list_spinlock);
kfree(owner);
return 0;
}
EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
#endif /* CONFIG_XEN_DOM0 */
...@@ -31,25 +31,10 @@ EXPORT_SYMBOL_GPL(hypercall_page); ...@@ -31,25 +31,10 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* Pointer to the xen_vcpu_info structure or * Pointer to the xen_vcpu_info structure or
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but if the hypervisor supports VCPUOP_register_vcpu_info then it can point * but during boot it is switched to point to xen_vcpu_info.
* to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to * The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events.
* acknowledge pending events.
* Also more subtly it is used by the patched version of irq enable/disable
* e.g. xen_irq_enable_direct and xen_iret in PV mode.
*
* The desire to be able to do those mask/unmask operations as a single
* instruction by using the per-cpu offset held in %gs is the real reason
* vcpu info is in a per-cpu pointer and the original reason for this
* hypercall.
*
*/ */
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
/*
* Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
* hypercall. This can be used both in PV and PVHVM mode. The structure
* overrides the default per_cpu(xen_vcpu, cpu) value.
*/
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */ /* Linux <-> Xen vCPU id mapping */
...@@ -84,21 +69,6 @@ EXPORT_SYMBOL(xen_start_flags); ...@@ -84,21 +69,6 @@ EXPORT_SYMBOL(xen_start_flags);
*/ */
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
/*
* Flag to determine whether vcpu info placement is available on all
* VCPUs. We assume it is to start with, and then set it to zero on
* the first failure. This is because it can succeed on some VCPUs
* and not others, since it can involve hypervisor memory allocation,
* or because the guest failed to guarantee all the appropriate
* constraints on all VCPUs (ie buffer can't cross a page boundary).
*
* Note that any particular CPU may be using a placed vcpu structure,
* but we can only optimise if the all are.
*
* 0: not available, 1: available
*/
int xen_have_vcpu_info_placement = 1;
static int xen_cpu_up_online(unsigned int cpu) static int xen_cpu_up_online(unsigned int cpu)
{ {
xen_init_lock_cpu(cpu); xen_init_lock_cpu(cpu);
...@@ -124,10 +94,8 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), ...@@ -124,10 +94,8 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
return rc >= 0 ? 0 : rc; return rc >= 0 ? 0 : rc;
} }
static int xen_vcpu_setup_restore(int cpu) static void xen_vcpu_setup_restore(int cpu)
{ {
int rc = 0;
/* Any per_cpu(xen_vcpu) is stale, so reset it */ /* Any per_cpu(xen_vcpu) is stale, so reset it */
xen_vcpu_info_reset(cpu); xen_vcpu_info_reset(cpu);
...@@ -136,11 +104,8 @@ static int xen_vcpu_setup_restore(int cpu) ...@@ -136,11 +104,8 @@ static int xen_vcpu_setup_restore(int cpu)
* be handled by hotplug. * be handled by hotplug.
*/ */
if (xen_pv_domain() || if (xen_pv_domain() ||
(xen_hvm_domain() && cpu_online(cpu))) { (xen_hvm_domain() && cpu_online(cpu)))
rc = xen_vcpu_setup(cpu); xen_vcpu_setup(cpu);
}
return rc;
} }
/* /*
...@@ -150,7 +115,7 @@ static int xen_vcpu_setup_restore(int cpu) ...@@ -150,7 +115,7 @@ static int xen_vcpu_setup_restore(int cpu)
*/ */
void xen_vcpu_restore(void) void xen_vcpu_restore(void)
{ {
int cpu, rc; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
bool other_cpu = (cpu != smp_processor_id()); bool other_cpu = (cpu != smp_processor_id());
...@@ -170,20 +135,9 @@ void xen_vcpu_restore(void) ...@@ -170,20 +135,9 @@ void xen_vcpu_restore(void)
if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_runstate_info(cpu); xen_setup_runstate_info(cpu);
rc = xen_vcpu_setup_restore(cpu); xen_vcpu_setup_restore(cpu);
if (rc)
pr_emerg_once("vcpu restore failed for cpu=%d err=%d. " if (other_cpu && is_up &&
"System will hang.\n", cpu, rc);
/*
* In case xen_vcpu_setup_restore() fails, do not bring up the
* VCPU. This helps us avoid the resulting OOPS when the VCPU
* accesses pvclock_vcpu_time via xen_vcpu (which is NULL.)
* Note that this does not improve the situation much -- now the
* VM hangs instead of OOPSing -- with the VCPUs that did not
* fail, spinning in stop_machine(), waiting for the failed
* VCPUs to come up.
*/
if (other_cpu && is_up && (rc == 0) &&
HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL)) HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
BUG(); BUG();
} }
...@@ -200,7 +154,7 @@ void xen_vcpu_info_reset(int cpu) ...@@ -200,7 +154,7 @@ void xen_vcpu_info_reset(int cpu)
} }
} }
int xen_vcpu_setup(int cpu) void xen_vcpu_setup(int cpu)
{ {
struct vcpu_register_vcpu_info info; struct vcpu_register_vcpu_info info;
int err; int err;
...@@ -221,44 +175,26 @@ int xen_vcpu_setup(int cpu) ...@@ -221,44 +175,26 @@ int xen_vcpu_setup(int cpu)
*/ */
if (xen_hvm_domain()) { if (xen_hvm_domain()) {
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
return 0; return;
} }
if (xen_have_vcpu_info_placement) { vcpup = &per_cpu(xen_vcpu_info, cpu);
vcpup = &per_cpu(xen_vcpu_info, cpu); info.mfn = arbitrary_virt_to_mfn(vcpup);
info.mfn = arbitrary_virt_to_mfn(vcpup); info.offset = offset_in_page(vcpup);
info.offset = offset_in_page(vcpup);
/*
* Check to see if the hypervisor will put the vcpu_info
* structure where we want it, which allows direct access via
* a percpu-variable.
* N.B. This hypercall can _only_ be called once per CPU.
* Subsequent calls will error out with -EINVAL. This is due to
* the fact that hypervisor has no unregister variant and this
* hypercall does not allow to over-write info.mfn and
* info.offset.
*/
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info,
xen_vcpu_nr(cpu), &info);
if (err) {
pr_warn_once("register_vcpu_info failed: cpu=%d err=%d\n",
cpu, err);
xen_have_vcpu_info_placement = 0;
} else {
/*
* This cpu is using the registered vcpu info, even if
* later ones fail to.
*/
per_cpu(xen_vcpu, cpu) = vcpup;
}
}
if (!xen_have_vcpu_info_placement) /*
xen_vcpu_info_reset(cpu); * N.B. This hypercall can _only_ be called once per CPU.
* Subsequent calls will error out with -EINVAL. This is due to
* the fact that hypervisor has no unregister variant and this
* hypercall does not allow to over-write info.mfn and
* info.offset.
*/
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
&info);
if (err)
panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err);
return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0); per_cpu(xen_vcpu, cpu) = vcpup;
} }
void __init xen_banner(void) void __init xen_banner(void)
......
...@@ -163,9 +163,9 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu) ...@@ -163,9 +163,9 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else else
per_cpu(xen_vcpu_id, cpu) = cpu; per_cpu(xen_vcpu_id, cpu) = cpu;
rc = xen_vcpu_setup(cpu); xen_vcpu_setup(cpu);
if (rc || !xen_have_vector_callback) if (!xen_have_vector_callback)
return rc; return 0;
if (xen_feature(XENFEAT_hvm_safe_pvclock)) if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu); xen_setup_timer(cpu);
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
#include <linux/highmem.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/edd.h> #include <linux/edd.h>
...@@ -993,31 +992,13 @@ void __init xen_setup_vcpu_info_placement(void) ...@@ -993,31 +992,13 @@ void __init xen_setup_vcpu_info_placement(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
/* Set up direct vCPU id mapping for PV guests. */ /* Set up direct vCPU id mapping for PV guests. */
per_cpu(xen_vcpu_id, cpu) = cpu; per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
/*
* xen_vcpu_setup(cpu) can fail -- in which case it
* falls back to the shared_info version for cpus
* where xen_vcpu_nr(cpu) < MAX_VIRT_CPUS.
*
* xen_cpu_up_prepare_pv() handles the rest by failing
* them in hotplug.
*/
(void) xen_vcpu_setup(cpu);
} }
/* pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
* xen_vcpu_setup managed to place the vcpu_info within the pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
* percpu area for all cpus, so make use of it. pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
*/ pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct);
if (xen_have_vcpu_info_placement) {
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_ops.irq.irq_disable =
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
pv_ops.irq.irq_enable =
__PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
pv_ops.mmu.read_cr2 =
__PV_IS_CALLEE_SAVE(xen_read_cr2_direct);
}
} }
static const struct pv_info xen_info __initconst = { static const struct pv_info xen_info __initconst = {
...@@ -1247,12 +1228,6 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1247,12 +1228,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
__supported_pte_mask &= ~_PAGE_GLOBAL; __supported_pte_mask &= ~_PAGE_GLOBAL;
__default_kernel_pte_mask &= ~_PAGE_GLOBAL; __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/*
* Prevent page tables from being allocated in highmem, even
* if CONFIG_HIGHPTE is enabled.
*/
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
/* Get mfn list */ /* Get mfn list */
xen_build_dynamic_phys_to_machine(); xen_build_dynamic_phys_to_machine();
......
...@@ -24,60 +24,6 @@ noinstr void xen_force_evtchn_callback(void) ...@@ -24,60 +24,6 @@ noinstr void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL); (void)HYPERVISOR_xen_version(0, NULL);
} }
asmlinkage __visible noinstr unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
vcpu = this_cpu_read(xen_vcpu);
/* flag has opposite sense of mask */
flags = !vcpu->evtchn_upcall_mask;
/* convert to IF type flag
-0 -> 0x00000000
-1 -> 0xffffffff
*/
return (-flags) & X86_EFLAGS_IF;
}
__PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl, ".noinstr.text");
asmlinkage __visible noinstr void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
}
__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable, ".noinstr.text");
asmlinkage __visible noinstr void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
/*
* We may be preempted as soon as vcpu->evtchn_upcall_mask is
* cleared, so disable preemption to ensure we check for
* events on the VCPU we are still running on.
*/
preempt_disable();
vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = 0;
/* Doesn't matter if we get preempted here, because any
pending event will get dealt with anyway. */
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
preempt_enable();
}
__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text");
static void xen_safe_halt(void) static void xen_safe_halt(void)
{ {
/* Blocking includes an implicit local_irq_enable(). */ /* Blocking includes an implicit local_irq_enable(). */
...@@ -96,10 +42,10 @@ static void xen_halt(void) ...@@ -96,10 +42,10 @@ static void xen_halt(void)
static const typeof(pv_ops) xen_irq_ops __initconst = { static const typeof(pv_ops) xen_irq_ops __initconst = {
.irq = { .irq = {
/* Initial interrupt flag handling only called while interrupts off. */
.save_fl = PV_CALLEE_SAVE(xen_save_fl), .save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0),
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable), .irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop),
.irq_enable = PV_CALLEE_SAVE(xen_irq_enable), .irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG),
.safe_halt = xen_safe_halt, .safe_halt = xen_safe_halt,
.halt = xen_halt, .halt = xen_halt,
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
*/ */
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/highmem.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -86,8 +85,10 @@ ...@@ -86,8 +85,10 @@
#include "mmu.h" #include "mmu.h"
#include "debugfs.h" #include "debugfs.h"
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* l3 pud for userspace vsyscall mapping */ /* l3 pud for userspace vsyscall mapping */
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif
/* /*
* Protects atomic reservation decrease/increase against concurrent increases. * Protects atomic reservation decrease/increase against concurrent increases.
...@@ -241,9 +242,11 @@ static void xen_set_pmd(pmd_t *ptr, pmd_t val) ...@@ -241,9 +242,11 @@ static void xen_set_pmd(pmd_t *ptr, pmd_t val)
* Associate a virtual page frame with a given physical page frame * Associate a virtual page frame with a given physical page frame
* and protection flags for that frame. * and protection flags for that frame.
*/ */
void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{ {
set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
UVMF_INVLPG))
BUG();
} }
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
...@@ -789,7 +792,9 @@ static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page, ...@@ -789,7 +792,9 @@ static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
static void __init xen_after_bootmem(void) static void __init xen_after_bootmem(void)
{ {
static_branch_enable(&xen_struct_pages_ready); static_branch_enable(&xen_struct_pages_ready);
#ifdef CONFIG_X86_VSYSCALL_EMULATION
SetPagePinned(virt_to_page(level3_user_vsyscall)); SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
} }
...@@ -1192,6 +1197,13 @@ static void __init xen_pagetable_p2m_setup(void) ...@@ -1192,6 +1197,13 @@ static void __init xen_pagetable_p2m_setup(void)
static void __init xen_pagetable_init(void) static void __init xen_pagetable_init(void)
{ {
/*
* The majority of further PTE writes is to pagetables already
* announced as such to Xen. Hence it is more efficient to use
* hypercalls for these updates.
*/
pv_ops.mmu.set_pte = __xen_set_pte;
paging_init(); paging_init();
xen_post_allocator_init(); xen_post_allocator_init();
...@@ -1421,10 +1433,18 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -1421,10 +1433,18 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
* *
* Many of these PTE updates are done on unpinned and writable pages * Many of these PTE updates are done on unpinned and writable pages
* and doing a hypercall for these is unnecessary and expensive. At * and doing a hypercall for these is unnecessary and expensive. At
* this point it is not possible to tell if a page is pinned or not, * this point it is rarely possible to tell if a page is pinned, so
* so always write the PTE directly and rely on Xen trapping and * mostly write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary. * emulating any updates as necessary.
*/ */
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
if (unlikely(is_early_ioremap_ptep(ptep)))
__xen_set_pte(ptep, pte);
else
native_set_pte(ptep, pte);
}
__visible pte_t xen_make_pte_init(pteval_t pte) __visible pte_t xen_make_pte_init(pteval_t pte)
{ {
unsigned long pfn; unsigned long pfn;
...@@ -1446,11 +1466,6 @@ __visible pte_t xen_make_pte_init(pteval_t pte) ...@@ -1446,11 +1466,6 @@ __visible pte_t xen_make_pte_init(pteval_t pte)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
__xen_set_pte(ptep, pte);
}
/* Early in boot, while setting up the initial pagetable, assume /* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */ everything is pinned. */
static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
...@@ -1750,7 +1765,6 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1750,7 +1765,6 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(init_top_pgt, PAGE_KERNEL_RO); set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
...@@ -1767,6 +1781,13 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1767,6 +1781,13 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Unpin Xen-provided one */ /* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Pin user vsyscall L3 */
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
#endif
/* /*
* At this stage there can be no user pgd, and no page structure to * At this stage there can be no user pgd, and no page structure to
* attach it to, so make sure we just set kernel pgd. * attach it to, so make sure we just set kernel pgd.
...@@ -1999,6 +2020,7 @@ static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; ...@@ -1999,6 +2020,7 @@ static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{ {
pte_t pte; pte_t pte;
unsigned long vaddr;
phys >>= PAGE_SHIFT; phys >>= PAGE_SHIFT;
...@@ -2039,15 +2061,15 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) ...@@ -2039,15 +2061,15 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
break; break;
} }
__native_set_fixmap(idx, pte); vaddr = __fix_to_virt(idx);
if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
BUG();
#ifdef CONFIG_X86_VSYSCALL_EMULATION #ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Replicate changes to map the vsyscall page into the user /* Replicate changes to map the vsyscall page into the user
pagetable vsyscall mapping. */ pagetable vsyscall mapping. */
if (idx == VSYSCALL_PAGE) { if (idx == VSYSCALL_PAGE)
unsigned long vaddr = __fix_to_virt(idx);
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
}
#endif #endif
} }
......
...@@ -306,10 +306,6 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) ...@@ -306,10 +306,6 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
BUG(); BUG();
} }
/* Update kernel mapping, but not for highmem. */
if (pfn >= PFN_UP(__pa(high_memory - 1)))
return;
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(mfn, PAGE_KERNEL), 0)) { mfn_pte(mfn, PAGE_KERNEL), 0)) {
WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n", WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
...@@ -429,13 +425,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -429,13 +425,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
} }
/* /*
* If the PFNs are currently mapped, the VA mapping also needs * If the PFNs are currently mapped, their VA mappings need to be
* to be updated to be 1:1. * zapped.
*/ */
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
(void)HYPERVISOR_update_va_mapping( (void)HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT), (unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(pfn, PAGE_KERNEL_IO), 0); native_make_pte(0), 0);
return remap_pfn; return remap_pfn;
} }
......
...@@ -121,34 +121,10 @@ int xen_smp_intr_init(unsigned int cpu) ...@@ -121,34 +121,10 @@ int xen_smp_intr_init(unsigned int cpu)
void __init xen_smp_cpus_done(unsigned int max_cpus) void __init xen_smp_cpus_done(unsigned int max_cpus)
{ {
int cpu, rc, count = 0;
if (xen_hvm_domain()) if (xen_hvm_domain())
native_smp_cpus_done(max_cpus); native_smp_cpus_done(max_cpus);
else else
calculate_max_logical_packages(); calculate_max_logical_packages();
if (xen_have_vcpu_info_placement)
return;
for_each_online_cpu(cpu) {
if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
continue;
rc = remove_cpu(cpu);
if (rc == 0) {
/*
* Reset vcpu_info so this cpu cannot be onlined again.
*/
xen_vcpu_info_reset(cpu);
count++;
} else {
pr_warn("%s: failed to bring CPU %d down, error %d\n",
__func__, cpu, rc);
}
}
WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
} }
void xen_smp_send_reschedule(int cpu) void xen_smp_send_reschedule(int cpu)
...@@ -268,20 +244,16 @@ void xen_send_IPI_allbutself(int vector) ...@@ -268,20 +244,16 @@ void xen_send_IPI_allbutself(int vector)
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{ {
irq_enter();
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
inc_irq_stat(irq_call_count); inc_irq_stat(irq_call_count);
irq_exit();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{ {
irq_enter();
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
inc_irq_stat(irq_call_count); inc_irq_stat(irq_call_count);
irq_exit();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -458,10 +458,8 @@ static void xen_pv_stop_other_cpus(int wait) ...@@ -458,10 +458,8 @@ static void xen_pv_stop_other_cpus(int wait)
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{ {
irq_enter();
irq_work_run(); irq_work_run();
inc_irq_stat(apic_irq_work_irqs); inc_irq_stat(apic_irq_work_irqs);
irq_exit();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -45,13 +45,13 @@ SYM_CODE_START(startup_xen) ...@@ -45,13 +45,13 @@ SYM_CODE_START(startup_xen)
/* Clear .bss */ /* Clear .bss */
xor %eax,%eax xor %eax,%eax
mov $__bss_start, %_ASM_DI mov $__bss_start, %rdi
mov $__bss_stop, %_ASM_CX mov $__bss_stop, %rcx
sub %_ASM_DI, %_ASM_CX sub %rdi, %rcx
shr $__ASM_SEL(2, 3), %_ASM_CX shr $3, %rcx
rep __ASM_SIZE(stos) rep stosq
mov %_ASM_SI, xen_start_info mov %rsi, xen_start_info
mov initial_stack(%rip), %rsp mov initial_stack(%rip), %rsp
/* Set up %gs. /* Set up %gs.
......
...@@ -76,9 +76,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id); ...@@ -76,9 +76,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
bool xen_vcpu_stolen(int vcpu); bool xen_vcpu_stolen(int vcpu);
extern int xen_have_vcpu_info_placement; void xen_vcpu_setup(int cpu);
int xen_vcpu_setup(int cpu);
void xen_vcpu_info_reset(int cpu); void xen_vcpu_info_reset(int cpu);
void xen_setup_vcpu_info_placement(void); void xen_setup_vcpu_info_placement(void);
......
...@@ -181,10 +181,34 @@ config SWIOTLB_XEN ...@@ -181,10 +181,34 @@ config SWIOTLB_XEN
select DMA_OPS select DMA_OPS
select SWIOTLB select SWIOTLB
config XEN_PCI_STUB
bool
config XEN_PCIDEV_STUB
tristate "Xen PCI-device stub driver"
depends on PCI && !X86 && XEN
depends on XEN_BACKEND
select XEN_PCI_STUB
default m
help
The PCI device stub driver provides limited version of the PCI
device backend driver without para-virtualized support for guests.
If you select this to be a module, you will need to make sure no
other driver has bound to the device(s) you want to make visible to
other guests.
The "hide" parameter (only applicable if backend driver is compiled
into the kernel) allows you to bind the PCI devices to this module
from the default device drivers. The argument is the list of PCI BDFs:
xen-pciback.hide=(03:00.0)(04:00.0)
If in doubt, say m.
config XEN_PCIDEV_BACKEND config XEN_PCIDEV_BACKEND
tristate "Xen PCI-device backend driver" tristate "Xen PCI-device backend driver"
depends on PCI && X86 && XEN depends on PCI && X86 && XEN
depends on XEN_BACKEND depends on XEN_BACKEND
select XEN_PCI_STUB
default m default m
help help
The PCI device backend driver allows the kernel to export arbitrary The PCI device backend driver allows the kernel to export arbitrary
......
...@@ -24,7 +24,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o ...@@ -24,7 +24,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM_GUEST) += platform-pci.o obj-$(CONFIG_XEN_PVHVM_GUEST) += platform-pci.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ obj-$(CONFIG_XEN_PCI_STUB) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
obj-$(CONFIG_XEN_EFI) += efi.o obj-$(CONFIG_XEN_EFI) += efi.o
......
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#include <linux/percpu-defs.h> #include <linux/percpu-defs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/moduleparam.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -73,9 +74,14 @@ ...@@ -73,9 +74,14 @@
#include <xen/page.h> #include <xen/page.h>
#include <xen/mem-reservation.h> #include <xen/mem-reservation.h>
static int xen_hotplug_unpopulated; #undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "xen."
static uint __read_mostly balloon_boot_timeout = 180;
module_param(balloon_boot_timeout, uint, 0444);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
static int xen_hotplug_unpopulated;
static struct ctl_table balloon_table[] = { static struct ctl_table balloon_table[] = {
{ {
...@@ -108,6 +114,8 @@ static struct ctl_table xen_root[] = { ...@@ -108,6 +114,8 @@ static struct ctl_table xen_root[] = {
{ } { }
}; };
#else
#define xen_hotplug_unpopulated 0
#endif #endif
/* /*
...@@ -125,12 +133,12 @@ static struct ctl_table xen_root[] = { ...@@ -125,12 +133,12 @@ static struct ctl_table xen_root[] = {
* BP_ECANCELED: error, balloon operation canceled. * BP_ECANCELED: error, balloon operation canceled.
*/ */
enum bp_state { static enum bp_state {
BP_DONE, BP_DONE,
BP_WAIT, BP_WAIT,
BP_EAGAIN, BP_EAGAIN,
BP_ECANCELED BP_ECANCELED
}; } balloon_state = BP_DONE;
/* Main waiting point for xen-balloon thread. */ /* Main waiting point for xen-balloon thread. */
static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq); static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
...@@ -199,18 +207,15 @@ static struct page *balloon_next_page(struct page *page) ...@@ -199,18 +207,15 @@ static struct page *balloon_next_page(struct page *page)
return list_entry(next, struct page, lru); return list_entry(next, struct page, lru);
} }
static enum bp_state update_schedule(enum bp_state state) static void update_schedule(void)
{ {
if (state == BP_WAIT) if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED)
return BP_WAIT; return;
if (state == BP_ECANCELED)
return BP_ECANCELED;
if (state == BP_DONE) { if (balloon_state == BP_DONE) {
balloon_stats.schedule_delay = 1; balloon_stats.schedule_delay = 1;
balloon_stats.retry_count = 1; balloon_stats.retry_count = 1;
return BP_DONE; return;
} }
++balloon_stats.retry_count; ++balloon_stats.retry_count;
...@@ -219,7 +224,8 @@ static enum bp_state update_schedule(enum bp_state state) ...@@ -219,7 +224,8 @@ static enum bp_state update_schedule(enum bp_state state)
balloon_stats.retry_count > balloon_stats.max_retry_count) { balloon_stats.retry_count > balloon_stats.max_retry_count) {
balloon_stats.schedule_delay = 1; balloon_stats.schedule_delay = 1;
balloon_stats.retry_count = 1; balloon_stats.retry_count = 1;
return BP_ECANCELED; balloon_state = BP_ECANCELED;
return;
} }
balloon_stats.schedule_delay <<= 1; balloon_stats.schedule_delay <<= 1;
...@@ -227,7 +233,7 @@ static enum bp_state update_schedule(enum bp_state state) ...@@ -227,7 +233,7 @@ static enum bp_state update_schedule(enum bp_state state)
if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
return BP_EAGAIN; balloon_state = BP_EAGAIN;
} }
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
...@@ -494,9 +500,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -494,9 +500,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
* Stop waiting if either state is BP_DONE and ballooning action is * Stop waiting if either state is BP_DONE and ballooning action is
* needed, or if the credit has changed while state is not BP_DONE. * needed, or if the credit has changed while state is not BP_DONE.
*/ */
static bool balloon_thread_cond(enum bp_state state, long credit) static bool balloon_thread_cond(long credit)
{ {
if (state == BP_DONE) if (balloon_state == BP_DONE)
credit = 0; credit = 0;
return current_credit() != credit || kthread_should_stop(); return current_credit() != credit || kthread_should_stop();
...@@ -510,13 +516,12 @@ static bool balloon_thread_cond(enum bp_state state, long credit) ...@@ -510,13 +516,12 @@ static bool balloon_thread_cond(enum bp_state state, long credit)
*/ */
static int balloon_thread(void *unused) static int balloon_thread(void *unused)
{ {
enum bp_state state = BP_DONE;
long credit; long credit;
unsigned long timeout; unsigned long timeout;
set_freezable(); set_freezable();
for (;;) { for (;;) {
switch (state) { switch (balloon_state) {
case BP_DONE: case BP_DONE:
case BP_ECANCELED: case BP_ECANCELED:
timeout = 3600 * HZ; timeout = 3600 * HZ;
...@@ -532,7 +537,7 @@ static int balloon_thread(void *unused) ...@@ -532,7 +537,7 @@ static int balloon_thread(void *unused)
credit = current_credit(); credit = current_credit();
wait_event_freezable_timeout(balloon_thread_wq, wait_event_freezable_timeout(balloon_thread_wq,
balloon_thread_cond(state, credit), timeout); balloon_thread_cond(credit), timeout);
if (kthread_should_stop()) if (kthread_should_stop())
return 0; return 0;
...@@ -543,22 +548,23 @@ static int balloon_thread(void *unused) ...@@ -543,22 +548,23 @@ static int balloon_thread(void *unused)
if (credit > 0) { if (credit > 0) {
if (balloon_is_inflated()) if (balloon_is_inflated())
state = increase_reservation(credit); balloon_state = increase_reservation(credit);
else else
state = reserve_additional_memory(); balloon_state = reserve_additional_memory();
} }
if (credit < 0) { if (credit < 0) {
long n_pages; long n_pages;
n_pages = min(-credit, si_mem_available()); n_pages = min(-credit, si_mem_available());
state = decrease_reservation(n_pages, GFP_BALLOON); balloon_state = decrease_reservation(n_pages,
if (state == BP_DONE && n_pages != -credit && GFP_BALLOON);
if (balloon_state == BP_DONE && n_pages != -credit &&
n_pages < totalreserve_pages) n_pages < totalreserve_pages)
state = BP_EAGAIN; balloon_state = BP_EAGAIN;
} }
state = update_schedule(state); update_schedule();
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
...@@ -575,7 +581,8 @@ void balloon_set_new_target(unsigned long target) ...@@ -575,7 +581,8 @@ void balloon_set_new_target(unsigned long target)
} }
EXPORT_SYMBOL_GPL(balloon_set_new_target); EXPORT_SYMBOL_GPL(balloon_set_new_target);
static int add_ballooned_pages(int nr_pages) #ifndef CONFIG_XEN_UNPOPULATED_ALLOC
static int add_ballooned_pages(unsigned int nr_pages)
{ {
enum bp_state st; enum bp_state st;
...@@ -603,14 +610,14 @@ static int add_ballooned_pages(int nr_pages) ...@@ -603,14 +610,14 @@ static int add_ballooned_pages(int nr_pages)
} }
/** /**
* alloc_xenballooned_pages - get pages that have been ballooned out * xen_alloc_unpopulated_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get * @nr_pages: Number of pages to get
* @pages: pages returned * @pages: pages returned
* @return 0 on success, error otherwise * @return 0 on success, error otherwise
*/ */
int alloc_xenballooned_pages(int nr_pages, struct page **pages) int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{ {
int pgno = 0; unsigned int pgno = 0;
struct page *page; struct page *page;
int ret; int ret;
...@@ -645,7 +652,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -645,7 +652,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
return 0; return 0;
out_undo: out_undo:
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
free_xenballooned_pages(pgno, pages); xen_free_unpopulated_pages(pgno, pages);
/* /*
* NB: free_xenballooned_pages will only subtract pgno pages, but since * NB: free_xenballooned_pages will only subtract pgno pages, but since
* target_unpopulated is incremented with nr_pages at the start we need * target_unpopulated is incremented with nr_pages at the start we need
...@@ -654,16 +661,16 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -654,16 +661,16 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
balloon_stats.target_unpopulated -= nr_pages - pgno; balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret; return ret;
} }
EXPORT_SYMBOL(alloc_xenballooned_pages); EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
/** /**
* free_xenballooned_pages - return pages retrieved with get_ballooned_pages * xen_free_unpopulated_pages - return pages retrieved with get_ballooned_pages
* @nr_pages: Number of pages * @nr_pages: Number of pages
* @pages: pages to return * @pages: pages to return
*/ */
void free_xenballooned_pages(int nr_pages, struct page **pages) void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{ {
int i; unsigned int i;
mutex_lock(&balloon_mutex); mutex_lock(&balloon_mutex);
...@@ -680,9 +687,9 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -680,9 +687,9 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
} }
EXPORT_SYMBOL(free_xenballooned_pages); EXPORT_SYMBOL(xen_free_unpopulated_pages);
#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) #if defined(CONFIG_XEN_PV)
static void __init balloon_add_region(unsigned long start_pfn, static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages) unsigned long pages)
{ {
...@@ -705,6 +712,7 @@ static void __init balloon_add_region(unsigned long start_pfn, ...@@ -705,6 +712,7 @@ static void __init balloon_add_region(unsigned long start_pfn,
balloon_stats.total_pages += extra_pfn_end - start_pfn; balloon_stats.total_pages += extra_pfn_end - start_pfn;
} }
#endif #endif
#endif
static int __init balloon_init(void) static int __init balloon_init(void)
{ {
...@@ -765,3 +773,38 @@ static int __init balloon_init(void) ...@@ -765,3 +773,38 @@ static int __init balloon_init(void)
return 0; return 0;
} }
subsys_initcall(balloon_init); subsys_initcall(balloon_init);
static int __init balloon_wait_finish(void)
{
long credit, last_credit = 0;
unsigned long last_changed = 0;
if (!xen_domain())
return -ENODEV;
/* PV guests don't need to wait. */
if (xen_pv_domain() || !current_credit())
return 0;
pr_notice("Waiting for initial ballooning down having finished.\n");
while ((credit = current_credit()) < 0) {
if (credit != last_credit) {
last_changed = jiffies;
last_credit = credit;
}
if (balloon_state == BP_ECANCELED) {
pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
-credit);
if (jiffies - last_changed >= HZ * balloon_boot_timeout)
panic("Initial ballooning failed!\n");
}
schedule_timeout_interruptible(HZ / 10);
}
pr_notice("Initial ballooning down finished.\n");
return 0;
}
late_initcall_sync(balloon_wait_finish);
...@@ -35,6 +35,7 @@ void __xenmem_reservation_va_mapping_update(unsigned long count, ...@@ -35,6 +35,7 @@ void __xenmem_reservation_va_mapping_update(unsigned long count,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct page *page = pages[i]; struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
int ret;
BUG_ON(!page); BUG_ON(!page);
...@@ -46,16 +47,10 @@ void __xenmem_reservation_va_mapping_update(unsigned long count, ...@@ -46,16 +47,10 @@ void __xenmem_reservation_va_mapping_update(unsigned long count,
set_phys_to_machine(pfn, frames[i]); set_phys_to_machine(pfn, frames[i]);
/* Link back into the page tables if not highmem. */ ret = HYPERVISOR_update_va_mapping(
if (!PageHighMem(page)) { (unsigned long)__va(pfn << PAGE_SHIFT),
int ret; mfn_pte(frames[i], PAGE_KERNEL), 0);
BUG_ON(ret);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
} }
} }
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update); EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
...@@ -68,6 +63,7 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count, ...@@ -68,6 +63,7 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct page *page = pages[i]; struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
int ret;
/* /*
* We don't support PV MMU when Linux and Xen are using * We don't support PV MMU when Linux and Xen are using
...@@ -75,14 +71,11 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count, ...@@ -75,14 +71,11 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count,
*/ */
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping(
int ret; (unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY); __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
} }
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/pci-acpi.h> #include <linux/pci-acpi.h>
#include <xen/pci.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -254,3 +255,78 @@ static int xen_mcfg_late(void) ...@@ -254,3 +255,78 @@ static int xen_mcfg_late(void)
return 0; return 0;
} }
#endif #endif
#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
struct pci_dev *dev;
struct list_head list;
};
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
{
struct xen_device_domain_owner *owner;
list_for_each_entry(owner, &dev_domain_list, list) {
if (owner->dev == dev)
return owner;
}
return NULL;
}
int xen_find_device_domain_owner(struct pci_dev *dev)
{
struct xen_device_domain_owner *owner;
int domain = -ENODEV;
spin_lock(&dev_domain_list_spinlock);
owner = find_device(dev);
if (owner)
domain = owner->domain;
spin_unlock(&dev_domain_list_spinlock);
return domain;
}
EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
{
struct xen_device_domain_owner *owner;
owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
if (!owner)
return -ENODEV;
spin_lock(&dev_domain_list_spinlock);
if (find_device(dev)) {
spin_unlock(&dev_domain_list_spinlock);
kfree(owner);
return -EEXIST;
}
owner->domain = domain;
owner->dev = dev;
list_add_tail(&owner->list, &dev_domain_list);
spin_unlock(&dev_domain_list_spinlock);
return 0;
}
EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
int xen_unregister_device_domain_owner(struct pci_dev *dev)
{
struct xen_device_domain_owner *owner;
spin_lock(&dev_domain_list_spinlock);
owner = find_device(dev);
if (!owner) {
spin_unlock(&dev_domain_list_spinlock);
return -ENODEV;
}
list_del(&owner->list);
spin_unlock(&dev_domain_list_spinlock);
kfree(owner);
return 0;
}
EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
#endif
...@@ -465,7 +465,6 @@ static int pvcalls_back_release_passive(struct xenbus_device *dev, ...@@ -465,7 +465,6 @@ static int pvcalls_back_release_passive(struct xenbus_device *dev,
write_unlock_bh(&mappass->sock->sk->sk_callback_lock); write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
} }
sock_release(mappass->sock); sock_release(mappass->sock);
flush_workqueue(mappass->wq);
destroy_workqueue(mappass->wq); destroy_workqueue(mappass->wq);
kfree(mappass); kfree(mappass);
......
...@@ -450,7 +450,7 @@ static struct acpi_processor_performance __percpu *acpi_perf_data; ...@@ -450,7 +450,7 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
static void free_acpi_perf_data(void) static void free_acpi_perf_data(void)
{ {
unsigned int i; int i;
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
for_each_possible_cpu(i) for_each_possible_cpu(i)
...@@ -462,7 +462,7 @@ static void free_acpi_perf_data(void) ...@@ -462,7 +462,7 @@ static void free_acpi_perf_data(void)
static int xen_upload_processor_pm_data(void) static int xen_upload_processor_pm_data(void)
{ {
struct acpi_processor *pr_backup = NULL; struct acpi_processor *pr_backup = NULL;
unsigned int i; int i;
int rc = 0; int rc = 0;
pr_info("Uploading Xen processor PM info\n"); pr_info("Uploading Xen processor PM info\n");
...@@ -518,7 +518,7 @@ static struct syscore_ops xap_syscore_ops = { ...@@ -518,7 +518,7 @@ static struct syscore_ops xap_syscore_ops = {
static int __init xen_acpi_processor_init(void) static int __init xen_acpi_processor_init(void)
{ {
unsigned int i; int i;
int rc; int rc;
if (!xen_initial_domain()) if (!xen_initial_domain())
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# N.B. The below cannot be expressed with a single line using
# CONFIG_XEN_PCI_STUB as it always remains in "y" state,
# thus preventing the driver to be built as a module.
# Please note, that CONFIG_XEN_PCIDEV_BACKEND and
# CONFIG_XEN_PCIDEV_STUB are mutually exclusive.
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
obj-$(CONFIG_XEN_PCIDEV_STUB) += xen-pciback.o
xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
xen-pciback-y += conf_space.o conf_space_header.o \ xen-pciback-y += conf_space.o conf_space_header.o \
......
...@@ -160,7 +160,7 @@ static void *pm_ctrl_init(struct pci_dev *dev, int offset) ...@@ -160,7 +160,7 @@ static void *pm_ctrl_init(struct pci_dev *dev, int offset)
} }
out: out:
return ERR_PTR(err); return err ? ERR_PTR(err) : NULL;
} }
static const struct config_field caplist_pm[] = { static const struct config_field caplist_pm[] = {
......
...@@ -236,8 +236,12 @@ static void *bar_init(struct pci_dev *dev, int offset) ...@@ -236,8 +236,12 @@ static void *bar_init(struct pci_dev *dev, int offset)
else { else {
pos = (offset - PCI_BASE_ADDRESS_0) / 4; pos = (offset - PCI_BASE_ADDRESS_0) / 4;
if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) { if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) {
bar->val = res[pos - 1].start >> 32; /*
bar->len_val = -resource_size(&res[pos - 1]) >> 32; * Use ">> 16 >> 16" instead of direct ">> 32" shift
* to avoid warnings on 32-bit architectures.
*/
bar->val = res[pos - 1].start >> 16 >> 16;
bar->len_val = -resource_size(&res[pos - 1]) >> 16 >> 16;
return bar; return bar;
} }
} }
......
...@@ -19,7 +19,8 @@ ...@@ -19,7 +19,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <xen/events.h> #include <xen/events.h>
#include <asm/xen/pci.h> #include <xen/pci.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include "pciback.h" #include "pciback.h"
......
...@@ -71,6 +71,11 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, ...@@ -71,6 +71,11 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev); struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev); void pcistub_put_pci_dev(struct pci_dev *dev);
static inline bool xen_pcibk_pv_support(void)
{
return IS_ENABLED(CONFIG_XEN_PCIDEV_BACKEND);
}
/* Ensure a device is turned off or reset */ /* Ensure a device is turned off or reset */
void xen_pcibk_reset_device(struct pci_dev *pdev); void xen_pcibk_reset_device(struct pci_dev *pdev);
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
#include <xen/events.h> #include <xen/events.h>
#include <asm/xen/pci.h> #include <xen/pci.h>
#include "pciback.h" #include "pciback.h"
#define INVALID_EVTCHN_IRQ (-1) #define INVALID_EVTCHN_IRQ (-1)
...@@ -743,6 +743,9 @@ const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend; ...@@ -743,6 +743,9 @@ const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
int __init xen_pcibk_xenbus_register(void) int __init xen_pcibk_xenbus_register(void)
{ {
if (!xen_pcibk_pv_support())
return 0;
xen_pcibk_backend = &xen_pcibk_vpci_backend; xen_pcibk_backend = &xen_pcibk_vpci_backend;
if (passthrough) if (passthrough)
xen_pcibk_backend = &xen_pcibk_passthrough_backend; xen_pcibk_backend = &xen_pcibk_passthrough_backend;
...@@ -752,5 +755,6 @@ int __init xen_pcibk_xenbus_register(void) ...@@ -752,5 +755,6 @@ int __init xen_pcibk_xenbus_register(void)
void __exit xen_pcibk_xenbus_unregister(void) void __exit xen_pcibk_xenbus_unregister(void)
{ {
xenbus_unregister_driver(&xen_pcibk_driver); if (xen_pcibk_pv_support())
xenbus_unregister_driver(&xen_pcibk_driver);
} }
...@@ -53,7 +53,6 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg); ...@@ -53,7 +53,6 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg);
int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg);
int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
int HYPERVISOR_tmem_op(void *arg);
int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs, int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs,
struct xen_dm_op_buf *bufs); struct xen_dm_op_buf *bufs);
...@@ -74,18 +73,4 @@ HYPERVISOR_suspend(unsigned long start_info_mfn) ...@@ -74,18 +73,4 @@ HYPERVISOR_suspend(unsigned long start_info_mfn)
return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
} }
static inline void
MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
unsigned int new_val, unsigned long flags)
{
BUG();
}
static inline void
MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
int count, int *success_count, domid_t domid)
{
BUG();
}
#endif /* _ASM_ARM_XEN_HYPERCALL_H */ #endif /* _ASM_ARM_XEN_HYPERCALL_H */
...@@ -26,9 +26,6 @@ extern struct balloon_stats balloon_stats; ...@@ -26,9 +26,6 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target); void balloon_set_new_target(unsigned long target);
int alloc_xenballooned_pages(int nr_pages, struct page **pages);
void free_xenballooned_pages(int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_BALLOON #ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void); void xen_balloon_init(void);
#else #else
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* callback.h * callback.h
* *
* Register guest OS callbacks with Xen. * Register guest OS callbacks with Xen.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2006, Ian Campbell * Copyright (c) 2006, Ian Campbell
*/ */
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* elfnote.h * elfnote.h
* *
* Definitions used for the Xen ELF notes. * Definitions used for the Xen ELF notes.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2006, Ian Campbell, XenSource Ltd. * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* event_channel.h * event_channel.h
* *
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* features.h * features.h
* *
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* grant_table.h * grant_table.h
* *
* Interface for granting foreign access to page frames, and receiving * Interface for granting foreign access to page frames, and receiving
* page-ownership transfers. * page-ownership transfers.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2004, K A Fraser * Copyright (c) 2004, K A Fraser
*/ */
......
/* SPDX-License-Identifier: MIT */
/* /*
* Copyright (c) 2016, Citrix Systems Inc * Copyright (c) 2016, Citrix Systems Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/ */
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__ #ifndef __XEN_PUBLIC_HVM_DM_OP_H__
......
/* /* SPDX-License-Identifier: MIT */
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__
......
/* SPDX-License-Identifier: MIT */
/* /*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com> * Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com>
*/ */
......
/* /* SPDX-License-Identifier: MIT */
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__
#define __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__
......
/* SPDX-License-Identifier: MIT */
/* /*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2016, Citrix Systems, Inc. * Copyright (c) 2016, Citrix Systems, Inc.
*/ */
......
/* SPDX-License-Identifier: MIT */
/* /*
* 9pfs.h -- Xen 9PFS transport * 9pfs.h -- Xen 9PFS transport
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com> * Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* blkif.h * blkif.h
* *
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* console.h * console.h
* *
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* displif.h * displif.h
* *
* Unified display device I/O interface for Xen guest OSes. * Unified display device I/O interface for Xen guest OSes.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2016-2017 EPAM Systems Inc. * Copyright (C) 2016-2017 EPAM Systems Inc.
* *
* Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> * Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
......
/* SPDX-License-Identifier: MIT */
/* /*
* fbif.h -- Xen virtual frame buffer device * fbif.h -- Xen virtual frame buffer device
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/ */
......
/* SPDX-License-Identifier: MIT */
/* /*
* kbdif.h -- Xen virtual keyboard/mouse * kbdif.h -- Xen virtual keyboard/mouse
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com> * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/ */
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* xen_netif.h * xen_netif.h
* *
* Unified network-device I/O interface for Xen guest OSes. * Unified network-device I/O interface for Xen guest OSes.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2003-2004, Keir Fraser * Copyright (c) 2003-2004, Keir Fraser
*/ */
......
/* SPDX-License-Identifier: MIT */
/* /*
* PCI Backend/Frontend Common Data Structures & Macros * PCI Backend/Frontend Common Data Structures & Macros
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil> * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/ */
#ifndef __XEN_PCI_COMMON_H__ #ifndef __XEN_PCI_COMMON_H__
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
#ifndef __XEN_PROTOCOLS_H__ #ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__
......
/* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__ #ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__ #define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* ring.h * ring.h
* *
* Shared producer-consumer ring macros. * Shared producer-consumer ring macros.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Tim Deegan and Andrew Warfield November 2004. * Tim Deegan and Andrew Warfield November 2004.
*/ */
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* sndif.h * sndif.h
* *
* Unified sound-device I/O interface for Xen guest OSes. * Unified sound-device I/O interface for Xen guest OSes.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2013-2015 GlobalLogic Inc. * Copyright (C) 2013-2015 GlobalLogic Inc.
* Copyright (C) 2016-2017 EPAM Systems Inc. * Copyright (C) 2016-2017 EPAM Systems Inc.
* *
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* vscsiif.h * vscsiif.h
* *
* Based on the blkif.h code. * Based on the blkif.h code.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright(c) FUJITSU Limited 2008. * Copyright(c) FUJITSU Limited 2008.
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/***************************************************************************** /*****************************************************************************
* xenbus.h * xenbus.h
* *
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/* /*
* Details of the "wire" protocol between Xen Store Daemon and client * Details of the "wire" protocol between Xen Store Daemon and client
* library or guest kernel. * library or guest kernel.
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* memory.h * memory.h
* *
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* nmi.h * nmi.h
* *
......
/* /* SPDX-License-Identifier: MIT */
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_PHYSDEV_H__ #ifndef __XEN_PUBLIC_PHYSDEV_H__
#define __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* platform.h * platform.h
* *
* Hardware platform operations. Intended for use by domain-0 kernel. * Hardware platform operations. Intended for use by domain-0 kernel.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2002-2006, K Fraser * Copyright (c) 2002-2006, K Fraser
*/ */
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* sched.h * sched.h
* *
* Scheduler state interactions * Scheduler state interactions
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2005, Keir Fraser <keir@xensource.com> * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/ */
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* vcpu.h * vcpu.h
* *
* VCPU initialisation, query, and hotplug. * VCPU initialisation, query, and hotplug.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2005, Keir Fraser <keir@xensource.com> * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* version.h * version.h
* *
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* arch-x86/mca.h * arch-x86/mca.h
* Guest OS machine check interface to x86 Xen. * Guest OS machine check interface to x86 Xen.
......
/* SPDX-License-Identifier: MIT */
/****************************************************************************** /******************************************************************************
* xen.h * xen.h
* *
* Guest OS interface to Xen. * Guest OS interface to Xen.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2004, K A Fraser * Copyright (c) 2004, K A Fraser
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: MIT */
#ifndef __XEN_PUBLIC_XENPMU_H__ #ifndef __XEN_PUBLIC_XENPMU_H__
#define __XEN_PUBLIC_XENPMU_H__ #define __XEN_PUBLIC_XENPMU_H__
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __XEN_PCI_H__
#define __XEN_PCI_H__
#if defined(CONFIG_XEN_DOM0)
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;
}
static inline int xen_register_device_domain_owner(struct pci_dev *dev,
uint16_t domain)
{
return -1;
}
static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
{
return -1;
}
#endif
#endif
...@@ -52,13 +52,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, ...@@ -52,13 +52,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size; extern u64 xen_saved_max_mem_size;
#endif #endif
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
#else
#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
#define xen_free_unpopulated_pages free_xenballooned_pages
#include <xen/balloon.h>
#endif
#endif /* _XEN_XEN_H */ #endif /* _XEN_XEN_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment