Commit b5d72dda authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.3a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
 "Fixes and features:

   - A series to introduce a common command line parameter for disabling
     paravirtual extensions when running as a guest in virtualized
     environment

   - A fix for int3 handling in Xen pv guests

   - Removal of the Xen-specific tmem driver as support of tmem in Xen
     has been dropped (and it was experimental only)

   - A security fix for running as Xen dom0 (XSA-300)

   - A fix for IRQ handling when offlining cpus in Xen guests

   - Some small cleanups"

* tag 'for-linus-5.3a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: let alloc_xenballooned_pages() fail if not enough memory free
  xen/pv: Fix a boot up hang revealed by int3 self test
  x86/xen: Add "nopv" support for HVM guest
  x86/paravirt: Remove const mark from x86_hyper_xen_hvm variable
  xen: Map "xen_nopv" parameter to "nopv" and mark it obsolete
  x86: Add "nopv" parameter to disable PV extensions
  x86/xen: Mark xen_hvm_need_lapic() and xen_x2apic_para_available() as __init
  xen: remove tmem driver
  Revert "x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized"
  xen/events: fix binding user event channels to cpus
parents 26473f83 a1078e82
...@@ -4698,27 +4698,6 @@ ...@@ -4698,27 +4698,6 @@
Force threading of all interrupt handlers except those Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD. marked explicitly IRQF_NO_THREAD.
tmem [KNL,XEN]
Enable the Transcendent memory driver if built-in.
tmem.cleancache=0|1 [KNL, XEN]
Default is on (1). Disable the usage of the cleancache
API to send anonymous pages to the hypervisor.
tmem.frontswap=0|1 [KNL, XEN]
Default is on (1). Disable the usage of the frontswap
API to send swap pages to the hypervisor. If disabled
the selfballooning and selfshrinking are force disabled.
tmem.selfballooning=0|1 [KNL, XEN]
Default is on (1). Disable the driving of swap pages
to the hypervisor.
tmem.selfshrinking=0|1 [KNL, XEN]
Default is on (1). Partial swapoff that immediately
transfers pages from Xen hypervisor back to the
kernel based on different criteria.
topology= [S390] topology= [S390]
Format: {off | on} Format: {off | on}
Specify if the kernel should make use of the cpu Specify if the kernel should make use of the cpu
...@@ -5288,6 +5267,8 @@ ...@@ -5288,6 +5267,8 @@
xen_nopv [X86] xen_nopv [X86]
Disables the PV optimizations forcing the HVM guest to Disables the PV optimizations forcing the HVM guest to
run as generic HVM guest with no PV drivers. run as generic HVM guest with no PV drivers.
This option is obsoleted by the "nopv" option, which
has equivalent effect for XEN platform.
xen_scrub_pages= [XEN] xen_scrub_pages= [XEN]
Boolean option to control scrubbing pages before giving them back Boolean option to control scrubbing pages before giving them back
...@@ -5302,6 +5283,11 @@ ...@@ -5302,6 +5283,11 @@
improve timer resolution at the expense of processing improve timer resolution at the expense of processing
more timer interrupts. more timer interrupts.
nopv= [X86,XEN,KVM,HYPER_V,VMWARE]
Disables the PV optimizations forcing the guest to run
as generic guest with no PV drivers. Currently support
XEN HVM, KVM, HYPER_V and VMWARE guest.
xirc2ps_cs= [NET,PCMCIA] xirc2ps_cs= [NET,PCMCIA]
Format: Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
......
...@@ -1176,7 +1176,6 @@ idtentry stack_segment do_stack_segment has_error_code=1 ...@@ -1176,7 +1176,6 @@ idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
idtentry xennmi do_nmi has_error_code=0 idtentry xennmi do_nmi has_error_code=0
idtentry xendebug do_debug has_error_code=0 idtentry xendebug do_debug has_error_code=0
idtentry xenint3 do_int3 has_error_code=0
#endif #endif
idtentry general_protection do_general_protection has_error_code=1 idtentry general_protection do_general_protection has_error_code=1
......
...@@ -53,8 +53,20 @@ struct hypervisor_x86 { ...@@ -53,8 +53,20 @@ struct hypervisor_x86 {
/* runtime callbacks */ /* runtime callbacks */
struct x86_hyper_runtime runtime; struct x86_hyper_runtime runtime;
/* ignore nopv parameter */
bool ignore_nopv;
}; };
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_pv;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern const struct hypervisor_x86 x86_hyper_jailhouse;
extern const struct hypervisor_x86 x86_hyper_acrn;
extern struct hypervisor_x86 x86_hyper_xen_hvm;
extern bool nopv;
extern enum x86_hypervisor_type x86_hyper_type; extern enum x86_hypervisor_type x86_hyper_type;
extern void init_hypervisor_platform(void); extern void init_hypervisor_platform(void);
static inline bool hypervisor_is_type(enum x86_hypervisor_type type) static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
......
...@@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void); ...@@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void);
asmlinkage void xen_divide_error(void); asmlinkage void xen_divide_error(void);
asmlinkage void xen_xennmi(void); asmlinkage void xen_xennmi(void);
asmlinkage void xen_xendebug(void); asmlinkage void xen_xendebug(void);
asmlinkage void xen_xenint3(void); asmlinkage void xen_int3(void);
asmlinkage void xen_overflow(void); asmlinkage void xen_overflow(void);
asmlinkage void xen_bounds(void); asmlinkage void xen_bounds(void);
asmlinkage void xen_invalid_op(void); asmlinkage void xen_invalid_op(void);
......
...@@ -301,6 +301,8 @@ extern struct x86_apic_ops x86_apic_ops; ...@@ -301,6 +301,8 @@ extern struct x86_apic_ops x86_apic_ops;
extern void x86_early_init_platform_quirks(void); extern void x86_early_init_platform_quirks(void);
extern void x86_init_noop(void); extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused); extern void x86_init_uint_noop(unsigned int unused);
extern bool bool_x86_init_noop(void);
extern void x86_op_int_noop(int cpu);
extern bool x86_pnpbios_disabled(void); extern bool x86_pnpbios_disabled(void);
#endif #endif
...@@ -44,14 +44,14 @@ static inline uint32_t xen_cpuid_base(void) ...@@ -44,14 +44,14 @@ static inline uint32_t xen_cpuid_base(void)
} }
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
extern bool xen_hvm_need_lapic(void); extern bool __init xen_hvm_need_lapic(void);
static inline bool xen_x2apic_para_available(void) static inline bool __init xen_x2apic_para_available(void)
{ {
return xen_hvm_need_lapic(); return xen_hvm_need_lapic();
} }
#else #else
static inline bool xen_x2apic_para_available(void) static inline bool __init xen_x2apic_para_available(void)
{ {
return (xen_cpuid_base() != 0); return (xen_cpuid_base() != 0);
} }
......
...@@ -26,14 +26,6 @@ ...@@ -26,14 +26,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_pv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern const struct hypervisor_x86 x86_hyper_jailhouse;
extern const struct hypervisor_x86 x86_hyper_acrn;
static const __initconst struct hypervisor_x86 * const hypervisors[] = static const __initconst struct hypervisor_x86 * const hypervisors[] =
{ {
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
...@@ -58,6 +50,14 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = ...@@ -58,6 +50,14 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
enum x86_hypervisor_type x86_hyper_type; enum x86_hypervisor_type x86_hyper_type;
EXPORT_SYMBOL(x86_hyper_type); EXPORT_SYMBOL(x86_hyper_type);
bool __initdata nopv;
static __init int parse_nopv(char *arg)
{
nopv = true;
return 0;
}
early_param("nopv", parse_nopv);
static inline const struct hypervisor_x86 * __init static inline const struct hypervisor_x86 * __init
detect_hypervisor_vendor(void) detect_hypervisor_vendor(void)
{ {
...@@ -65,6 +65,9 @@ detect_hypervisor_vendor(void) ...@@ -65,6 +65,9 @@ detect_hypervisor_vendor(void)
uint32_t pri, max_pri = 0; uint32_t pri, max_pri = 0;
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
if (unlikely(nopv) && !(*p)->ignore_nopv)
continue;
pri = (*p)->detect(); pri = (*p)->detect();
if (pri > max_pri) { if (pri > max_pri) {
max_pri = pri; max_pri = pri;
......
...@@ -217,4 +217,5 @@ const struct hypervisor_x86 x86_hyper_jailhouse __refconst = { ...@@ -217,4 +217,5 @@ const struct hypervisor_x86 x86_hyper_jailhouse __refconst = {
.detect = jailhouse_detect, .detect = jailhouse_detect,
.init.init_platform = jailhouse_init_platform, .init.init_platform = jailhouse_init_platform,
.init.x2apic_available = jailhouse_x2apic_available, .init.x2apic_available = jailhouse_x2apic_available,
.ignore_nopv = true,
}; };
...@@ -1368,8 +1368,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1368,8 +1368,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
pr_info("CPU0: "); pr_info("CPU0: ");
print_cpu_info(&cpu_data(0)); print_cpu_info(&cpu_data(0));
native_pv_lock_init();
uv_system_init(); uv_system_init();
set_mtrr_aps_delayed_init(); set_mtrr_aps_delayed_init();
...@@ -1399,6 +1397,7 @@ void __init native_smp_prepare_boot_cpu(void) ...@@ -1399,6 +1397,7 @@ void __init native_smp_prepare_boot_cpu(void)
/* already set me in cpu_online_mask in boot_cpu_init() */ /* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask); cpumask_set_cpu(me, cpu_callout_mask);
cpu_set_state_online(me); cpu_set_state_online(me);
native_pv_lock_init();
} }
void __init calculate_max_logical_packages(void) void __init calculate_max_logical_packages(void)
......
...@@ -29,8 +29,8 @@ void x86_init_noop(void) { } ...@@ -29,8 +29,8 @@ void x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { } void __init x86_init_uint_noop(unsigned int unused) { }
static int __init iommu_init_noop(void) { return 0; } static int __init iommu_init_noop(void) { return 0; }
static void iommu_shutdown_noop(void) { } static void iommu_shutdown_noop(void) { }
static bool __init bool_x86_init_noop(void) { return false; } bool __init bool_x86_init_noop(void) { return false; }
static void x86_op_int_noop(int cpu) { } void x86_op_int_noop(int cpu) { }
/* /*
* The platform setup functions are preset with the default functions * The platform setup functions are preset with the default functions
......
...@@ -210,18 +210,18 @@ static void __init xen_hvm_guest_init(void) ...@@ -210,18 +210,18 @@ static void __init xen_hvm_guest_init(void)
#endif #endif
} }
static bool xen_nopv;
static __init int xen_parse_nopv(char *arg) static __init int xen_parse_nopv(char *arg)
{ {
xen_nopv = true; pr_notice("\"xen_nopv\" is deprecated, please use \"nopv\" instead\n");
if (xen_cpuid_base())
nopv = true;
return 0; return 0;
} }
early_param("xen_nopv", xen_parse_nopv); early_param("xen_nopv", xen_parse_nopv);
bool xen_hvm_need_lapic(void) bool __init xen_hvm_need_lapic(void)
{ {
if (xen_nopv)
return false;
if (xen_pv_domain()) if (xen_pv_domain())
return false; return false;
if (!xen_hvm_domain()) if (!xen_hvm_domain())
...@@ -230,15 +230,6 @@ bool xen_hvm_need_lapic(void) ...@@ -230,15 +230,6 @@ bool xen_hvm_need_lapic(void)
return false; return false;
return true; return true;
} }
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
static uint32_t __init xen_platform_hvm(void)
{
if (xen_pv_domain() || xen_nopv)
return 0;
return xen_cpuid_base();
}
static __init void xen_hvm_guest_late_init(void) static __init void xen_hvm_guest_late_init(void)
{ {
...@@ -251,6 +242,9 @@ static __init void xen_hvm_guest_late_init(void) ...@@ -251,6 +242,9 @@ static __init void xen_hvm_guest_late_init(void)
/* PVH detected. */ /* PVH detected. */
xen_pvh = true; xen_pvh = true;
if (nopv)
panic("\"nopv\" and \"xen_nopv\" parameters are unsupported in PVH guest.");
/* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */ /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC) if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC)
acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM; acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
...@@ -260,7 +254,38 @@ static __init void xen_hvm_guest_late_init(void) ...@@ -260,7 +254,38 @@ static __init void xen_hvm_guest_late_init(void)
#endif #endif
} }
const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = { static uint32_t __init xen_platform_hvm(void)
{
uint32_t xen_domain = xen_cpuid_base();
struct x86_hyper_init *h = &x86_hyper_xen_hvm.init;
if (xen_pv_domain())
return 0;
if (xen_pvh_domain() && nopv) {
/* Guest booting via the Xen-PVH boot entry goes here */
pr_info("\"nopv\" parameter is ignored in PVH guest\n");
nopv = false;
} else if (nopv && xen_domain) {
/*
* Guest booting via normal boot entry (like via grub2) goes
* here.
*
* Use interface functions for bare hardware if nopv,
* xen_hvm_guest_late_init is an exception as we need to
* detect PVH and panic there.
*/
h->init_platform = x86_init_noop;
h->x2apic_available = bool_x86_init_noop;
h->init_mem_mapping = x86_init_noop;
h->init_after_bootmem = x86_init_noop;
h->guest_late_init = xen_hvm_guest_late_init;
x86_hyper_xen_hvm.runtime.pin_vcpu = x86_op_int_noop;
}
return xen_domain;
}
struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
.name = "Xen HVM", .name = "Xen HVM",
.detect = xen_platform_hvm, .detect = xen_platform_hvm,
.type = X86_HYPER_XEN_HVM, .type = X86_HYPER_XEN_HVM,
...@@ -269,4 +294,5 @@ const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = { ...@@ -269,4 +294,5 @@ const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = {
.init.init_mem_mapping = xen_hvm_init_mem_mapping, .init.init_mem_mapping = xen_hvm_init_mem_mapping,
.init.guest_late_init = xen_hvm_guest_late_init, .init.guest_late_init = xen_hvm_guest_late_init,
.runtime.pin_vcpu = xen_pin_vcpu, .runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
}; };
...@@ -596,12 +596,12 @@ struct trap_array_entry { ...@@ -596,12 +596,12 @@ struct trap_array_entry {
static struct trap_array_entry trap_array[] = { static struct trap_array_entry trap_array[] = {
{ debug, xen_xendebug, true }, { debug, xen_xendebug, true },
{ int3, xen_xenint3, true },
{ double_fault, xen_double_fault, true }, { double_fault, xen_double_fault, true },
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
{ machine_check, xen_machine_check, true }, { machine_check, xen_machine_check, true },
#endif #endif
{ nmi, xen_xennmi, true }, { nmi, xen_xennmi, true },
{ int3, xen_int3, false },
{ overflow, xen_overflow, false }, { overflow, xen_overflow, false },
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
{ entry_INT80_compat, xen_entry_INT80_compat, false }, { entry_INT80_compat, xen_entry_INT80_compat, false },
...@@ -1463,4 +1463,5 @@ const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { ...@@ -1463,4 +1463,5 @@ const __initconst struct hypervisor_x86 x86_hyper_xen_pv = {
.detect = xen_platform_pv, .detect = xen_platform_pv,
.type = X86_HYPER_XEN_PV, .type = X86_HYPER_XEN_PV,
.runtime.pin_vcpu = xen_pin_vcpu, .runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
}; };
...@@ -68,11 +68,8 @@ void xen_init_lock_cpu(int cpu) ...@@ -68,11 +68,8 @@ void xen_init_lock_cpu(int cpu)
int irq; int irq;
char *name; char *name;
if (!xen_pvspin) { if (!xen_pvspin)
if (cpu == 0)
static_branch_disable(&virt_spin_lock_key);
return; return;
}
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu)); cpu, per_cpu(lock_kicker_irq, cpu));
...@@ -124,6 +121,7 @@ void __init xen_init_spinlocks(void) ...@@ -124,6 +121,7 @@ void __init xen_init_spinlocks(void)
if (!xen_pvspin) { if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
static_branch_disable(&virt_spin_lock_key);
return; return;
} }
printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
......
...@@ -32,7 +32,6 @@ xen_pv_trap divide_error ...@@ -32,7 +32,6 @@ xen_pv_trap divide_error
xen_pv_trap debug xen_pv_trap debug
xen_pv_trap xendebug xen_pv_trap xendebug
xen_pv_trap int3 xen_pv_trap int3
xen_pv_trap xenint3
xen_pv_trap xennmi xen_pv_trap xennmi
xen_pv_trap overflow xen_pv_trap overflow
xen_pv_trap bounds xen_pv_trap bounds
......
...@@ -10,21 +10,6 @@ config XEN_BALLOON ...@@ -10,21 +10,6 @@ config XEN_BALLOON
the system to expand the domain's memory allocation, or alternatively the system to expand the domain's memory allocation, or alternatively
return unneeded memory to the system. return unneeded memory to the system.
config XEN_SELFBALLOONING
bool "Dynamically self-balloon kernel memory to target"
depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
help
Self-ballooning dynamically balloons available kernel memory driven
by the current usage of anonymous memory ("committed AS") and
controlled by various sysfs-settable parameters. Configuring
FRONTSWAP is highly recommended; if it is not configured, self-
ballooning is disabled by default. If FRONTSWAP is configured,
frontswap-selfshrinking is enabled by default but can be disabled
with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
is enabled by default but can be disabled with the 'tmem.selfballooning=0'
kernel boot parameter. Note that systems without a sufficiently
large swap device should not enable self-ballooning.
config XEN_BALLOON_MEMORY_HOTPLUG config XEN_BALLOON_MEMORY_HOTPLUG
bool "Memory hotplug support for Xen balloon driver" bool "Memory hotplug support for Xen balloon driver"
depends on XEN_BALLOON && MEMORY_HOTPLUG depends on XEN_BALLOON && MEMORY_HOTPLUG
...@@ -191,14 +176,6 @@ config SWIOTLB_XEN ...@@ -191,14 +176,6 @@ config SWIOTLB_XEN
def_bool y def_bool y
select SWIOTLB select SWIOTLB
config XEN_TMEM
tristate
depends on !ARM && !ARM64
default m if (CLEANCACHE || FRONTSWAP)
help
Shim to interface in-kernel Transcendent Memory hooks
(e.g. cleancache and frontswap) to Xen tmem hypercalls.
config XEN_PCIDEV_BACKEND config XEN_PCIDEV_BACKEND
tristate "Xen PCI-device backend driver" tristate "Xen PCI-device backend driver"
depends on PCI && X86 && XEN depends on PCI && X86 && XEN
......
...@@ -17,14 +17,12 @@ dom0-$(CONFIG_X86) += pcpu.o ...@@ -17,14 +17,12 @@ dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y) obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
......
...@@ -535,8 +535,15 @@ static void balloon_process(struct work_struct *work) ...@@ -535,8 +535,15 @@ static void balloon_process(struct work_struct *work)
state = reserve_additional_memory(); state = reserve_additional_memory();
} }
if (credit < 0) if (credit < 0) {
state = decrease_reservation(-credit, GFP_BALLOON); long n_pages;
n_pages = min(-credit, si_mem_available());
state = decrease_reservation(n_pages, GFP_BALLOON);
if (state == BP_DONE && n_pages != -credit &&
n_pages < totalreserve_pages)
state = BP_EAGAIN;
}
state = update_schedule(state); state = update_schedule(state);
...@@ -575,6 +582,9 @@ static int add_ballooned_pages(int nr_pages) ...@@ -575,6 +582,9 @@ static int add_ballooned_pages(int nr_pages)
} }
} }
if (si_mem_available() < nr_pages)
return -ENOMEM;
st = decrease_reservation(nr_pages, GFP_USER); st = decrease_reservation(nr_pages, GFP_USER);
if (st != BP_DONE) if (st != BP_DONE)
return -ENOMEM; return -ENOMEM;
...@@ -707,7 +717,7 @@ static int __init balloon_init(void) ...@@ -707,7 +717,7 @@ static int __init balloon_init(void)
balloon_stats.schedule_delay = 1; balloon_stats.schedule_delay = 1;
balloon_stats.max_schedule_delay = 32; balloon_stats.max_schedule_delay = 32;
balloon_stats.retry_count = 1; balloon_stats.retry_count = 1;
balloon_stats.max_retry_count = RETRY_UNLIMITED; balloon_stats.max_retry_count = 4;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
set_online_page_callback(&xen_online_page); set_online_page_callback(&xen_online_page);
......
...@@ -1294,7 +1294,7 @@ void rebind_evtchn_irq(int evtchn, int irq) ...@@ -1294,7 +1294,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
} }
/* Rebind an evtchn so that it gets delivered to a specific cpu */ /* Rebind an evtchn so that it gets delivered to a specific cpu */
int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu) static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
{ {
struct evtchn_bind_vcpu bind_vcpu; struct evtchn_bind_vcpu bind_vcpu;
int masked; int masked;
...@@ -1328,7 +1328,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu) ...@@ -1328,7 +1328,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force) bool force)
...@@ -1342,6 +1341,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, ...@@ -1342,6 +1341,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
return ret; return ret;
} }
/* To be called with desc->lock held. */
int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
return set_affinity_irq(d, cpumask_of(tcpu), false);
}
EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
static void enable_dynirq(struct irq_data *data) static void enable_dynirq(struct irq_data *data)
{ {
int evtchn = evtchn_from_irq(data->irq); int evtchn = evtchn_from_irq(data->irq);
......
...@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn) ...@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn)
this_cpu_write(bind_last_selected_cpu, selected_cpu); this_cpu_write(bind_last_selected_cpu, selected_cpu);
/* unmask expects irqs to be disabled */ /* unmask expects irqs to be disabled */
xen_rebind_evtchn_to_cpu(evtchn, selected_cpu); xen_set_affinity_evtchn(desc, selected_cpu);
raw_spin_unlock_irqrestore(&desc->lock, flags); raw_spin_unlock_irqrestore(&desc->lock, flags);
} }
......
This diff is collapsed.
...@@ -129,8 +129,6 @@ void xen_balloon_init(void) ...@@ -129,8 +129,6 @@ void xen_balloon_init(void)
{ {
register_balloon(&balloon_dev); register_balloon(&balloon_dev);
register_xen_selfballooning(&balloon_dev);
register_xenstore_notifier(&xenstore_notifier); register_xenstore_notifier(&xenstore_notifier);
} }
EXPORT_SYMBOL_GPL(xen_balloon_init); EXPORT_SYMBOL_GPL(xen_balloon_init);
......
This diff is collapsed.
...@@ -27,16 +27,6 @@ void balloon_set_new_target(unsigned long target); ...@@ -27,16 +27,6 @@ void balloon_set_new_target(unsigned long target);
int alloc_xenballooned_pages(int nr_pages, struct page **pages); int alloc_xenballooned_pages(int nr_pages, struct page **pages);
void free_xenballooned_pages(int nr_pages, struct page **pages); void free_xenballooned_pages(int nr_pages, struct page **pages);
struct device;
#ifdef CONFIG_XEN_SELFBALLOONING
extern int register_xen_selfballooning(struct device *dev);
#else
static inline int register_xen_selfballooning(struct device *dev)
{
return -ENOSYS;
}
#endif
#ifdef CONFIG_XEN_BALLOON #ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void); void xen_balloon_init(void);
#else #else
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define _XEN_EVENTS_H #define _XEN_EVENTS_H
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h>
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
#include <linux/msi.h> #include <linux/msi.h>
#endif #endif
...@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn); ...@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq); void rebind_evtchn_irq(int evtchn, int irq);
int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu); int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(int port) static inline void notify_remote_via_evtchn(int port)
{ {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _XEN_TMEM_H
#define _XEN_TMEM_H
#include <linux/types.h>
#ifdef CONFIG_XEN_TMEM_MODULE
#define tmem_enabled true
#else
/* defined in drivers/xen/tmem.c */
extern bool tmem_enabled;
#endif
#ifdef CONFIG_XEN_SELFBALLOONING
extern int xen_selfballoon_init(bool, bool);
#endif
#endif /* _XEN_TMEM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment