Commit b1c3497e authored by Jane Malalane's avatar Jane Malalane Committed by Juergen Gross

x86/xen: Add support for HVMOP_set_evtchn_upcall_vector

Implement support for the HVMOP_set_evtchn_upcall_vector hypercall in
order to set the per-vCPU event channel vector callback on Linux and
use it in preference of HVM_PARAM_CALLBACK_IRQ.

If the per-VCPU vector setup is successful on BSP, use this method
for the APs. If not, fallback to the global vector-type callback.

Also register callback_irq at per-vCPU event channel setup to trick
toolstack to think the domain is enlightened.
Suggested-by: default avatar"Roger Pau Monné" <roger.pau@citrix.com>
Signed-off-by: default avatarJane Malalane <jane.malalane@citrix.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Link: https://lore.kernel.org/r/20220729070416.23306-1-jane.malalane@citrix.comSigned-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 251e90e7
...@@ -107,6 +107,8 @@ ...@@ -107,6 +107,8 @@
* ID field from 8 to 15 bits, allowing to target APIC IDs up 32768. * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
*/ */
#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5) #define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
/* Per-vCPU event channel upcalls */
#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6)
/* /*
* Leaf 6 (0x40000x05) * Leaf 6 (0x40000x05)
......
...@@ -23,7 +23,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) ...@@ -23,7 +23,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
/* No need for a barrier -- XCHG is a barrier on x86. */ /* No need for a barrier -- XCHG is a barrier on x86. */
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
extern int xen_have_vector_callback; extern bool xen_have_vector_callback;
/* /*
* Events delivered via platform PCI interrupts are always * Events delivered via platform PCI interrupts are always
...@@ -34,4 +34,5 @@ static inline bool xen_support_evtchn_rebind(void) ...@@ -34,4 +34,5 @@ static inline bool xen_support_evtchn_rebind(void)
return (!xen_hvm_domain() || xen_have_vector_callback); return (!xen_hvm_domain() || xen_have_vector_callback);
} }
extern bool xen_percpu_upcall;
#endif /* _ASM_X86_XEN_EVENTS_H */ #endif /* _ASM_X86_XEN_EVENTS_H */
...@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(xen_start_info); ...@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info; struct shared_info xen_dummy_shared_info;
__read_mostly int xen_have_vector_callback; __read_mostly bool xen_have_vector_callback = true;
EXPORT_SYMBOL_GPL(xen_have_vector_callback); EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/* /*
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <xen/features.h> #include <xen/features.h>
#include <xen/events.h> #include <xen/events.h>
#include <xen/hvm.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -31,6 +33,9 @@ ...@@ -31,6 +33,9 @@
static unsigned long shared_info_pfn; static unsigned long shared_info_pfn;
__ro_after_init bool xen_percpu_upcall;
EXPORT_SYMBOL_GPL(xen_percpu_upcall);
void xen_hvm_init_shared_info(void) void xen_hvm_init_shared_info(void)
{ {
struct xen_add_to_physmap xatp; struct xen_add_to_physmap xatp;
...@@ -126,6 +131,9 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback) ...@@ -126,6 +131,9 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
if (xen_percpu_upcall)
ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall(); xen_hvm_evtchn_do_upcall();
...@@ -169,6 +177,15 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu) ...@@ -169,6 +177,15 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
if (!xen_have_vector_callback) if (!xen_have_vector_callback)
return 0; return 0;
if (xen_percpu_upcall) {
rc = xen_set_upcall_vector(cpu);
if (rc) {
WARN(1, "HVMOP_set_evtchn_upcall_vector"
" for CPU %d failed: %d\n", cpu, rc);
return rc;
}
}
if (xen_feature(XENFEAT_hvm_safe_pvclock)) if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu); xen_setup_timer(cpu);
...@@ -189,8 +206,6 @@ static int xen_cpu_dead_hvm(unsigned int cpu) ...@@ -189,8 +206,6 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
return 0; return 0;
} }
static bool no_vector_callback __initdata;
static void __init xen_hvm_guest_init(void) static void __init xen_hvm_guest_init(void)
{ {
if (xen_pv_domain()) if (xen_pv_domain())
...@@ -213,9 +228,6 @@ static void __init xen_hvm_guest_init(void) ...@@ -213,9 +228,6 @@ static void __init xen_hvm_guest_init(void)
xen_panic_handler_init(); xen_panic_handler_init();
if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
xen_hvm_smp_init(); xen_hvm_smp_init();
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm)); WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
xen_unplug_emulated_devices(); xen_unplug_emulated_devices();
...@@ -241,7 +253,7 @@ early_param("xen_nopv", xen_parse_nopv); ...@@ -241,7 +253,7 @@ early_param("xen_nopv", xen_parse_nopv);
static __init int xen_parse_no_vector_callback(char *arg) static __init int xen_parse_no_vector_callback(char *arg)
{ {
no_vector_callback = true; xen_have_vector_callback = false;
return 0; return 0;
} }
early_param("xen_no_vector_callback", xen_parse_no_vector_callback); early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <xen/hvm.h> #include <xen/hvm.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/interface/features.h> #include <xen/interface/features.h>
#include <xen/events.h>
#include "xen-ops.h" #include "xen-ops.h"
...@@ -14,6 +15,13 @@ void xen_hvm_post_suspend(int suspend_cancelled) ...@@ -14,6 +15,13 @@ void xen_hvm_post_suspend(int suspend_cancelled)
xen_hvm_init_shared_info(); xen_hvm_init_shared_info();
xen_vcpu_restore(); xen_vcpu_restore();
} }
xen_setup_callback_vector(); if (xen_percpu_upcall) {
unsigned int cpu;
for_each_online_cpu(cpu)
BUG_ON(xen_set_upcall_vector(cpu));
} else {
xen_setup_callback_vector();
}
xen_unplug_emulated_devices(); xen_unplug_emulated_devices();
} }
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/xen/cpuid.h>
#include <asm/xen/pci.h> #include <asm/xen/pci.h>
#endif #endif
#include <asm/sync_bitops.h> #include <asm/sync_bitops.h>
...@@ -2183,6 +2184,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = { ...@@ -2183,6 +2184,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq, .irq_ack = ack_dynirq,
}; };
#ifdef CONFIG_X86
#ifdef CONFIG_XEN_PVHVM #ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event /* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any * channel notifications because we can receive vector callbacks on any
...@@ -2195,11 +2197,48 @@ void xen_setup_callback_vector(void) ...@@ -2195,11 +2197,48 @@ void xen_setup_callback_vector(void)
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
if (xen_set_callback_via(callback_via)) { if (xen_set_callback_via(callback_via)) {
pr_err("Request for Xen HVM callback vector failed\n"); pr_err("Request for Xen HVM callback vector failed\n");
xen_have_vector_callback = 0; xen_have_vector_callback = false;
} }
} }
} }
/*
* Setup per-vCPU vector-type callbacks. If this setup is unavailable,
* fallback to the global vector-type callback.
*/
static __init void xen_init_setup_upcall_vector(void)
{
if (!xen_have_vector_callback)
return;
if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
!xen_set_upcall_vector(0))
xen_percpu_upcall = true;
else if (xen_feature(XENFEAT_hvm_callback_vector))
xen_setup_callback_vector();
else
xen_have_vector_callback = false;
}
int xen_set_upcall_vector(unsigned int cpu)
{
int rc;
xen_hvm_evtchn_upcall_vector_t op = {
.vector = HYPERVISOR_CALLBACK_VECTOR,
.vcpu = per_cpu(xen_vcpu_id, cpu),
};
rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
if (rc)
return rc;
/* Trick toolstack to think we are enlightened. */
if (!cpu)
rc = xen_set_callback_via(1);
return rc;
}
static __init void xen_alloc_callback_vector(void) static __init void xen_alloc_callback_vector(void)
{ {
if (!xen_have_vector_callback) if (!xen_have_vector_callback)
...@@ -2210,8 +2249,11 @@ static __init void xen_alloc_callback_vector(void) ...@@ -2210,8 +2249,11 @@ static __init void xen_alloc_callback_vector(void)
} }
#else #else
void xen_setup_callback_vector(void) {} void xen_setup_callback_vector(void) {}
static inline void xen_init_setup_upcall_vector(void) {}
int xen_set_upcall_vector(unsigned int cpu) {}
static inline void xen_alloc_callback_vector(void) {} static inline void xen_alloc_callback_vector(void) {}
#endif #endif /* CONFIG_XEN_PVHVM */
#endif /* CONFIG_X86 */
bool xen_fifo_events = true; bool xen_fifo_events = true;
module_param_named(fifo_events, xen_fifo_events, bool, 0); module_param_named(fifo_events, xen_fifo_events, bool, 0);
...@@ -2271,10 +2313,9 @@ void __init xen_init_IRQ(void) ...@@ -2271,10 +2313,9 @@ void __init xen_init_IRQ(void)
if (xen_initial_domain()) if (xen_initial_domain())
pci_xen_initial_domain(); pci_xen_initial_domain();
} }
if (xen_feature(XENFEAT_hvm_callback_vector)) { xen_init_setup_upcall_vector();
xen_setup_callback_vector(); xen_alloc_callback_vector();
xen_alloc_callback_vector();
}
if (xen_hvm_domain()) { if (xen_hvm_domain()) {
native_init_IRQ(); native_init_IRQ();
......
...@@ -60,4 +60,6 @@ static inline int hvm_get_parameter(int idx, uint64_t *value) ...@@ -60,4 +60,6 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
void xen_setup_callback_vector(void); void xen_setup_callback_vector(void);
int xen_set_upcall_vector(unsigned int cpu);
#endif /* XEN_HVM_H__ */ #endif /* XEN_HVM_H__ */
...@@ -46,4 +46,23 @@ struct xen_hvm_get_mem_type { ...@@ -46,4 +46,23 @@ struct xen_hvm_get_mem_type {
}; };
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type); DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type);
#if defined(__i386__) || defined(__x86_64__)
/*
* HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
* channel upcalls on the specified <vcpu>. If set,
* this vector will be used in preference to the
* domain global callback via (see
* HVM_PARAM_CALLBACK_IRQ).
*/
#define HVMOP_set_evtchn_upcall_vector 23
struct xen_hvm_evtchn_upcall_vector {
uint32_t vcpu;
uint8_t vector;
};
typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_evtchn_upcall_vector_t);
#endif /* defined(__i386__) || defined(__x86_64__) */
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment