Commit e95e6f17 authored by David Vrabel's avatar David Vrabel Committed by Ingo Molnar

locking/pvqspinlock, x86: Enable PV qspinlock for Xen

This patch adds the necessary Xen specific code to allow Xen to
support the CPU halting and kicking operations needed by the queue
spinlock PV code.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarWaiman Long <Waiman.Long@hp.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-12-git-send-email-Waiman.Long@hp.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bf0c7c34
...@@ -17,6 +17,56 @@ ...@@ -17,6 +17,56 @@
#include "xen-ops.h" #include "xen-ops.h"
#include "debugfs.h" #include "debugfs.h"
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static bool xen_pvspin = true;
#ifdef CONFIG_QUEUED_SPINLOCK
#include <asm/qspinlock.h>
static void xen_qlock_kick(int cpu)
{
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}
/*
* Halt the current CPU & release it back to the host
*/
static void xen_qlock_wait(u8 *byte, u8 val)
{
int irq = __this_cpu_read(lock_kicker_irq);
/* If kicker interrupts not initialized yet, just spin */
if (irq == -1)
return;
/* clear pending */
xen_clear_irq_pending(irq);
barrier();
/*
* We check the byte value after clearing pending IRQ to make sure
* that we won't miss a wakeup event because of the clearing.
*
* The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
* So it is effectively a memory barrier for x86.
*/
if (READ_ONCE(*byte) != val)
return;
/*
* If an interrupt happens here, it will leave the wakeup irq
* pending, which will cause xen_poll_irq() to return
* immediately.
*/
/* Block until irq becomes pending (or perhaps a spurious wakeup) */
xen_poll_irq(irq);
}
#else /* CONFIG_QUEUED_SPINLOCK */
enum xen_contention_stat { enum xen_contention_stat {
TAKEN_SLOW, TAKEN_SLOW,
TAKEN_SLOW_PICKUP, TAKEN_SLOW_PICKUP,
...@@ -100,12 +150,9 @@ struct xen_lock_waiting { ...@@ -100,12 +150,9 @@ struct xen_lock_waiting {
__ticket_t want; __ticket_t want;
}; };
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus; static cpumask_t waiting_cpus;
static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{ {
int irq = __this_cpu_read(lock_kicker_irq); int irq = __this_cpu_read(lock_kicker_irq);
...@@ -217,6 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) ...@@ -217,6 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
} }
} }
} }
#endif /* CONFIG_QUEUED_SPINLOCK */
static irqreturn_t dummy_handler(int irq, void *dev_id) static irqreturn_t dummy_handler(int irq, void *dev_id)
{ {
...@@ -280,8 +328,16 @@ void __init xen_init_spinlocks(void) ...@@ -280,8 +328,16 @@ void __init xen_init_spinlocks(void)
return; return;
} }
printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
#ifdef CONFIG_QUEUED_SPINLOCK
__pv_init_lock_hash();
pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = xen_qlock_wait;
pv_lock_ops.kick = xen_qlock_kick;
#else
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
pv_lock_ops.unlock_kick = xen_unlock_kick; pv_lock_ops.unlock_kick = xen_unlock_kick;
#endif
} }
/* /*
...@@ -310,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg) ...@@ -310,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg)
} }
early_param("xen_nopvspin", xen_parse_nopvspin); early_param("xen_nopvspin", xen_parse_nopvspin);
#ifdef CONFIG_XEN_DEBUG_FS #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK)
static struct dentry *d_spin_debug; static struct dentry *d_spin_debug;
......
...@@ -240,7 +240,7 @@ config ARCH_USE_QUEUED_SPINLOCK ...@@ -240,7 +240,7 @@ config ARCH_USE_QUEUED_SPINLOCK
config QUEUED_SPINLOCK config QUEUED_SPINLOCK
def_bool y if ARCH_USE_QUEUED_SPINLOCK def_bool y if ARCH_USE_QUEUED_SPINLOCK
depends on SMP && (!PARAVIRT_SPINLOCKS || !XEN) depends on SMP
config ARCH_USE_QUEUE_RWLOCK config ARCH_USE_QUEUE_RWLOCK
bool bool
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment