Commit c3f31f6a authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

Merge branch 'x86/spinlocks' of...

Merge branch 'x86/spinlocks' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into stable/for-linus-3.12

* 'x86/spinlocks' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/kvm/guest: Fix sparse warning: "symbol 'klock_waiting' was not declared as static"
  kvm: Paravirtual ticketlocks support for linux guests running on KVM hypervisor
  kvm guest: Add configuration support to enable debug information for KVM Guests
  kvm uapi: Add KICK_CPU and PV_UNHALT definition to uapi
  xen, pvticketlock: Allow interrupts to be enabled while blocking
  x86, ticketlock: Add slowpath logic
  jump_label: Split jumplabel ratelimit
  x86, pvticketlock: When paravirtualizing ticket locks, increment by 2
  x86, pvticketlock: Use callee-save for lock_spinning
  xen, pvticketlocks: Add xen_nopvspin parameter to disable xen pv ticketlocks
  xen, pvticketlock: Xen implementation for PV ticket locks
  xen: Defer spinlock setup until boot CPU setup
  x86, ticketlock: Collapse a layer of functions
  x86, ticketlock: Don't inline _spin_unlock when using paravirt spinlocks
  x86, spinlock: Replace pv spinlocks with pv ticketlocks
parents e1a9c16b 36bd6213
...@@ -632,6 +632,7 @@ config PARAVIRT_DEBUG ...@@ -632,6 +632,7 @@ config PARAVIRT_DEBUG
config PARAVIRT_SPINLOCKS config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks" bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP depends on PARAVIRT && SMP
select UNINLINE_SPIN_UNLOCK
---help--- ---help---
Paravirtualized spinlocks allow a pvops backend to replace the Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly spinlock implementation with something virtualization-friendly
...@@ -656,6 +657,15 @@ config KVM_GUEST ...@@ -656,6 +657,15 @@ config KVM_GUEST
underlying device model, the host provides the guest with underlying device model, the host provides the guest with
timing infrastructure such as time of day, and system time timing infrastructure such as time of day, and system time
config KVM_DEBUG_FS
bool "Enable debug information for KVM Guests in debugfs"
depends on KVM_GUEST && DEBUG_FS
default n
---help---
This option enables collection of various statistics for KVM guest.
Statistics are displayed in debugfs filesystem. Enabling this option
may incur significant overhead.
source "arch/x86/lguest/Kconfig" source "arch/x86/lguest/Kconfig"
config PARAVIRT_TIME_ACCOUNTING config PARAVIRT_TIME_ACCOUNTING
......
...@@ -118,10 +118,20 @@ void kvm_async_pf_task_wait(u32 token); ...@@ -118,10 +118,20 @@ void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token); void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void); u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void); extern void kvm_disable_steal_time(void);
#else
#define kvm_guest_init() do { } while (0) #ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init kvm_spinlock_init(void);
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static inline void kvm_spinlock_init(void)
{
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
#else /* CONFIG_KVM_GUEST */
#define kvm_guest_init() do {} while (0)
#define kvm_async_pf_task_wait(T) do {} while(0) #define kvm_async_pf_task_wait(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0) #define kvm_async_pf_task_wake(T) do {} while(0)
static inline u32 kvm_read_and_reset_pf_reason(void) static inline u32 kvm_read_and_reset_pf_reason(void)
{ {
return 0; return 0;
......
...@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, ...@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int arch_spin_is_locked(struct arch_spinlock *lock) static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
__ticket_t ticket)
{ {
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
} }
static inline int arch_spin_is_contended(struct arch_spinlock *lock) static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
__ticket_t ticket)
{ {
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}
#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}
static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
unsigned long flags)
{
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
}
static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
}
static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
} }
#endif #endif
......
...@@ -327,13 +327,15 @@ struct pv_mmu_ops { ...@@ -327,13 +327,15 @@ struct pv_mmu_ops {
}; };
struct arch_spinlock; struct arch_spinlock;
#ifdef CONFIG_SMP
#include <asm/spinlock_types.h>
#else
typedef u16 __ticket_t;
#endif
struct pv_lock_ops { struct pv_lock_ops {
int (*spin_is_locked)(struct arch_spinlock *lock); struct paravirt_callee_save lock_spinning;
int (*spin_is_contended)(struct arch_spinlock *lock); void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
void (*spin_lock)(struct arch_spinlock *lock);
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
int (*spin_trylock)(struct arch_spinlock *lock);
void (*spin_unlock)(struct arch_spinlock *lock);
}; };
/* This contains all the paravirt structures: we get a convenient /* This contains all the paravirt structures: we get a convenient
......
#ifndef _ASM_X86_SPINLOCK_H #ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H #define _ASM_X86_SPINLOCK_H
#include <linux/jump_label.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/bitops.h>
/* /*
* Your basic SMP spinlocks, allowing only a single CPU anywhere * Your basic SMP spinlocks, allowing only a single CPU anywhere
* *
...@@ -34,6 +37,31 @@ ...@@ -34,6 +37,31 @@
# define UNLOCK_LOCK_PREFIX # define UNLOCK_LOCK_PREFIX
#endif #endif
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
extern struct static_key paravirt_ticketlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
{
set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
}
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
{
}
static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
__ticket_t ticket)
{
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
/* /*
* Ticket locks are conceptually two parts, one indicating the current head of * Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired * the queue, and the other indicating the current tail. The lock is acquired
...@@ -47,81 +75,101 @@ ...@@ -47,81 +75,101 @@
* in the high part, because a wide xadd increment of the low part would carry * in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part. * up and contaminate the high part.
*/ */
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
register struct __raw_tickets inc = { .tail = 1 }; register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
inc = xadd(&lock->tickets, inc); inc = xadd(&lock->tickets, inc);
if (likely(inc.head == inc.tail))
goto out;
inc.tail &= ~TICKET_SLOWPATH_FLAG;
for (;;) { for (;;) {
if (inc.head == inc.tail) unsigned count = SPIN_THRESHOLD;
break;
cpu_relax(); do {
inc.head = ACCESS_ONCE(lock->tickets.head); if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
goto out;
cpu_relax();
} while (--count);
__ticket_lock_spinning(lock, inc.tail);
} }
barrier(); /* make sure nothing creeps before the lock is taken */ out: barrier(); /* make sure nothing creeps before the lock is taken */
} }
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{ {
arch_spinlock_t old, new; arch_spinlock_t old, new;
old.tickets = ACCESS_ONCE(lock->tickets); old.tickets = ACCESS_ONCE(lock->tickets);
if (old.tickets.head != old.tickets.tail) if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
return 0; return 0;
new.head_tail = old.head_tail + (1 << TICKET_SHIFT); new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */ /* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
} }
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
arch_spinlock_t old)
{ {
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); arch_spinlock_t new;
BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
/* Perform the unlock on the "before" copy */
old.tickets.head += TICKET_LOCK_INC;
/* Clear the slowpath flag */
new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
/*
* If the lock is uncontended, clear the flag - use cmpxchg in
* case it changes behind our back though.
*/
if (new.tickets.head != new.tickets.tail ||
cmpxchg(&lock->head_tail, old.head_tail,
new.head_tail) != old.head_tail) {
/*
* Lock still has someone queued for it, so wake up an
* appropriate waiter.
*/
__ticket_unlock_kick(lock, old.tickets.head);
}
} }
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); if (TICKET_SLOWPATH_FLAG &&
static_key_false(&paravirt_ticketlocks_enabled)) {
arch_spinlock_t prev;
return tmp.tail != tmp.head; prev = *lock;
} add_smp(&lock->tickets.head, TICKET_LOCK_INC);
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) /* add_smp() is a full mb() */
{
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
return (__ticket_t)(tmp.tail - tmp.head) > 1; if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
__ticket_unlock_slowpath(lock, prev);
} else
__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
} }
#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline int arch_spin_is_locked(arch_spinlock_t *lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{ {
return __ticket_spin_is_locked(lock); struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock(arch_spinlock_t *lock) return tmp.tail != tmp.head;
{
__ticket_spin_lock(lock);
} }
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{ {
return __ticket_spin_trylock(lock); struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
}
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
{
__ticket_spin_unlock(lock);
} }
#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags) unsigned long flags)
...@@ -129,8 +177,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, ...@@ -129,8 +177,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock); arch_spin_lock(lock);
} }
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{ {
while (arch_spin_is_locked(lock)) while (arch_spin_is_locked(lock))
......
#ifndef _ASM_X86_SPINLOCK_TYPES_H #ifndef _ASM_X86_SPINLOCK_TYPES_H
#define _ASM_X86_SPINLOCK_TYPES_H #define _ASM_X86_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
#include <linux/types.h> #include <linux/types.h>
#if (CONFIG_NR_CPUS < 256) #ifdef CONFIG_PARAVIRT_SPINLOCKS
#define __TICKET_LOCK_INC 2
#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
#else
#define __TICKET_LOCK_INC 1
#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
#endif
#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
typedef u8 __ticket_t; typedef u8 __ticket_t;
typedef u16 __ticketpair_t; typedef u16 __ticketpair_t;
#else #else
...@@ -15,6 +19,8 @@ typedef u16 __ticket_t; ...@@ -15,6 +19,8 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t; typedef u32 __ticketpair_t;
#endif #endif
#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
#define TICKET_SHIFT (sizeof(__ticket_t) * 8) #define TICKET_SHIFT (sizeof(__ticket_t) * 8)
typedef struct arch_spinlock { typedef struct arch_spinlock {
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define KVM_FEATURE_ASYNC_PF 4 #define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5 #define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6 #define KVM_FEATURE_PV_EOI 6
#define KVM_FEATURE_PV_UNHALT 7
/* The last 8 bits are used to indicate how to interpret the flags field /* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored. * in pvclock structure. If no bits are set, all flags are ignored.
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/debugfs.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/traps.h> #include <asm/traps.h>
...@@ -419,6 +420,7 @@ static void __init kvm_smp_prepare_boot_cpu(void) ...@@ -419,6 +420,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
WARN_ON(kvm_register_clock("primary cpu clock")); WARN_ON(kvm_register_clock("primary cpu clock"));
kvm_guest_cpu_init(); kvm_guest_cpu_init();
native_smp_prepare_boot_cpu(); native_smp_prepare_boot_cpu();
kvm_spinlock_init();
} }
static void kvm_guest_cpu_online(void *dummy) static void kvm_guest_cpu_online(void *dummy)
...@@ -523,3 +525,263 @@ static __init int activate_jump_labels(void) ...@@ -523,3 +525,263 @@ static __init int activate_jump_labels(void)
return 0; return 0;
} }
arch_initcall(activate_jump_labels); arch_initcall(activate_jump_labels);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
static void kvm_kick_cpu(int cpu)
{
int apicid;
unsigned long flags = 0;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}
enum kvm_contention_stat {
TAKEN_SLOW,
TAKEN_SLOW_PICKUP,
RELEASED_SLOW,
RELEASED_SLOW_KICKED,
NR_CONTENTION_STATS
};
#ifdef CONFIG_KVM_DEBUG_FS
#define HISTO_BUCKETS 30
static struct kvm_spinlock_stats
{
u32 contention_stats[NR_CONTENTION_STATS];
u32 histo_spin_blocked[HISTO_BUCKETS+1];
u64 time_blocked;
} spinlock_stats;
static u8 zero_stats;
static inline void check_zero(void)
{
u8 ret;
u8 old;
old = ACCESS_ONCE(zero_stats);
if (unlikely(old)) {
ret = cmpxchg(&zero_stats, old, 0);
/* This ensures only one fellow resets the stat */
if (ret == old)
memset(&spinlock_stats, 0, sizeof(spinlock_stats));
}
}
static inline void add_stats(enum kvm_contention_stat var, u32 val)
{
check_zero();
spinlock_stats.contention_stats[var] += val;
}
static inline u64 spin_time_start(void)
{
return sched_clock();
}
static void __spin_time_accum(u64 delta, u32 *array)
{
unsigned index;
index = ilog2(delta);
check_zero();
if (index < HISTO_BUCKETS)
array[index]++;
else
array[HISTO_BUCKETS]++;
}
static inline void spin_time_accum_blocked(u64 start)
{
u32 delta;
delta = sched_clock() - start;
__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
spinlock_stats.time_blocked += delta;
}
static struct dentry *d_spin_debug;
static struct dentry *d_kvm_debug;
struct dentry *kvm_init_debugfs(void)
{
d_kvm_debug = debugfs_create_dir("kvm", NULL);
if (!d_kvm_debug)
printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
return d_kvm_debug;
}
static int __init kvm_spinlock_debugfs(void)
{
struct dentry *d_kvm;
d_kvm = kvm_init_debugfs();
if (d_kvm == NULL)
return -ENOMEM;
d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
debugfs_create_u32("taken_slow", 0444, d_spin_debug,
&spinlock_stats.contention_stats[TAKEN_SLOW]);
debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
&spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
debugfs_create_u32("released_slow", 0444, d_spin_debug,
&spinlock_stats.contention_stats[RELEASED_SLOW]);
debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
&spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
debugfs_create_u64("time_blocked", 0444, d_spin_debug,
&spinlock_stats.time_blocked);
debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
return 0;
}
fs_initcall(kvm_spinlock_debugfs);
#else /* !CONFIG_KVM_DEBUG_FS */
static inline void add_stats(enum kvm_contention_stat var, u32 val)
{
}
static inline u64 spin_time_start(void)
{
return 0;
}
static inline void spin_time_accum_blocked(u64 start)
{
}
#endif /* CONFIG_KVM_DEBUG_FS */
struct kvm_lock_waiting {
struct arch_spinlock *lock;
__ticket_t want;
};
/* cpus 'waiting' on a spinlock to become available */
static cpumask_t waiting_cpus;
/* Track spinlock on which a cpu is waiting */
static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
struct kvm_lock_waiting *w;
int cpu;
u64 start;
unsigned long flags;
if (in_nmi())
return;
w = &__get_cpu_var(klock_waiting);
cpu = smp_processor_id();
start = spin_time_start();
/*
* Make sure an interrupt handler can't upset things in a
* partially setup state.
*/
local_irq_save(flags);
/*
* The ordering protocol on this is that the "lock" pointer
* may only be set non-NULL if the "want" ticket is correct.
* If we're updating "want", we must first clear "lock".
*/
w->lock = NULL;
smp_wmb();
w->want = want;
smp_wmb();
w->lock = lock;
add_stats(TAKEN_SLOW, 1);
/*
* This uses set_bit, which is atomic but we should not rely on its
* reordering gurantees. So barrier is needed after this call.
*/
cpumask_set_cpu(cpu, &waiting_cpus);
barrier();
/*
* Mark entry to slowpath before doing the pickup test to make
* sure we don't deadlock with an unlocker.
*/
__ticket_enter_slowpath(lock);
/*
* check again make sure it didn't become free while
* we weren't looking.
*/
if (ACCESS_ONCE(lock->tickets.head) == want) {
add_stats(TAKEN_SLOW_PICKUP, 1);
goto out;
}
/*
* halt until it's our turn and kicked. Note that we do safe halt
* for irq enabled case to avoid hang when lock info is overwritten
* in irq spinlock slowpath and no spurious interrupt occur to save us.
*/
if (arch_irqs_disabled_flags(flags))
halt();
else
safe_halt();
out:
cpumask_clear_cpu(cpu, &waiting_cpus);
w->lock = NULL;
local_irq_restore(flags);
spin_time_accum_blocked(start);
}
PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
/* Kick vcpu waiting on @lock->head to reach value @ticket */
static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
{
int cpu;
add_stats(RELEASED_SLOW, 1);
for_each_cpu(cpu, &waiting_cpus) {
const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
if (ACCESS_ONCE(w->lock) == lock &&
ACCESS_ONCE(w->want) == ticket) {
add_stats(RELEASED_SLOW_KICKED, 1);
kvm_kick_cpu(cpu);
break;
}
}
}
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
*/
void __init kvm_spinlock_init(void)
{
if (!kvm_para_available())
return;
/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return;
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
static_key_slow_inc(&paravirt_ticketlocks_enabled);
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
pv_lock_ops.unlock_kick = kvm_unlock_kick;
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
...@@ -4,25 +4,17 @@ ...@@ -4,25 +4,17 @@
*/ */
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/jump_label.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
static inline void
default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
arch_spin_lock(lock);
}
struct pv_lock_ops pv_lock_ops = { struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.spin_is_locked = __ticket_spin_is_locked, .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
.spin_is_contended = __ticket_spin_is_contended, .unlock_kick = paravirt_nop,
.spin_lock = __ticket_spin_lock,
.spin_lock_flags = default_spin_lock_flags,
.spin_trylock = __ticket_spin_trylock,
.spin_unlock = __ticket_spin_unlock,
#endif #endif
}; };
EXPORT_SYMBOL(pv_lock_ops); EXPORT_SYMBOL(pv_lock_ops);
struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
...@@ -279,6 +279,7 @@ static void __init xen_smp_prepare_boot_cpu(void) ...@@ -279,6 +279,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
xen_filter_cpu_maps(); xen_filter_cpu_maps();
xen_setup_vcpu_info_placement(); xen_setup_vcpu_info_placement();
xen_init_spinlocks();
} }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus) static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
...@@ -686,7 +687,6 @@ void __init xen_smp_init(void) ...@@ -686,7 +687,6 @@ void __init xen_smp_init(void)
{ {
smp_ops = xen_smp_ops; smp_ops = xen_smp_ops;
xen_fill_possible_map(); xen_fill_possible_map();
xen_init_spinlocks();
} }
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
......
This diff is collapsed.
...@@ -48,7 +48,6 @@ ...@@ -48,7 +48,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
...@@ -61,12 +60,6 @@ struct static_key { ...@@ -61,12 +60,6 @@ struct static_key {
#endif #endif
}; };
struct static_key_deferred {
struct static_key key;
unsigned long timeout;
struct delayed_work work;
};
# include <asm/jump_label.h> # include <asm/jump_label.h>
# define HAVE_JUMP_LABEL # define HAVE_JUMP_LABEL
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
...@@ -78,6 +71,7 @@ enum jump_label_type { ...@@ -78,6 +71,7 @@ enum jump_label_type {
struct module; struct module;
#include <linux/atomic.h>
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
#define JUMP_LABEL_TRUE_BRANCH 1UL #define JUMP_LABEL_TRUE_BRANCH 1UL
...@@ -119,10 +113,7 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, ...@@ -119,10 +113,7 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end); extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key); extern void static_key_slow_dec(struct static_key *key);
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void jump_label_apply_nops(struct module *mod); extern void jump_label_apply_nops(struct module *mod);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
#define STATIC_KEY_INIT_TRUE ((struct static_key) \ #define STATIC_KEY_INIT_TRUE ((struct static_key) \
{ .enabled = ATOMIC_INIT(1), .entries = (void *)1 }) { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
...@@ -131,8 +122,6 @@ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); ...@@ -131,8 +122,6 @@ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
#else /* !HAVE_JUMP_LABEL */ #else /* !HAVE_JUMP_LABEL */
#include <linux/atomic.h>
struct static_key { struct static_key {
atomic_t enabled; atomic_t enabled;
}; };
...@@ -141,10 +130,6 @@ static __always_inline void jump_label_init(void) ...@@ -141,10 +130,6 @@ static __always_inline void jump_label_init(void)
{ {
} }
struct static_key_deferred {
struct static_key key;
};
static __always_inline bool static_key_false(struct static_key *key) static __always_inline bool static_key_false(struct static_key *key)
{ {
if (unlikely(atomic_read(&key->enabled)) > 0) if (unlikely(atomic_read(&key->enabled)) > 0)
...@@ -169,11 +154,6 @@ static inline void static_key_slow_dec(struct static_key *key) ...@@ -169,11 +154,6 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled); atomic_dec(&key->enabled);
} }
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
static_key_slow_dec(&key->key);
}
static inline int jump_label_text_reserved(void *start, void *end) static inline int jump_label_text_reserved(void *start, void *end)
{ {
return 0; return 0;
...@@ -187,12 +167,6 @@ static inline int jump_label_apply_nops(struct module *mod) ...@@ -187,12 +167,6 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0; return 0;
} }
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
}
#define STATIC_KEY_INIT_TRUE ((struct static_key) \ #define STATIC_KEY_INIT_TRUE ((struct static_key) \
{ .enabled = ATOMIC_INIT(1) }) { .enabled = ATOMIC_INIT(1) })
#define STATIC_KEY_INIT_FALSE ((struct static_key) \ #define STATIC_KEY_INIT_FALSE ((struct static_key) \
......
#ifndef _LINUX_JUMP_LABEL_RATELIMIT_H
#define _LINUX_JUMP_LABEL_RATELIMIT_H
#include <linux/jump_label.h>
#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
struct static_key_deferred {
struct static_key key;
unsigned long timeout;
struct delayed_work work;
};
#endif
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
#else /* !HAVE_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
static_key_slow_dec(&key->key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
}
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
...@@ -48,6 +48,7 @@ struct perf_guest_info_callbacks { ...@@ -48,6 +48,7 @@ struct perf_guest_info_callbacks {
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/perf_regs.h> #include <linux/perf_regs.h>
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define KVM_HC_MMU_OP 2 #define KVM_HC_MMU_OP 2
#define KVM_HC_FEATURES 3 #define KVM_HC_FEATURES 3
#define KVM_HC_PPC_MAP_MAGIC_PAGE 4 #define KVM_HC_PPC_MAP_MAGIC_PAGE 4
#define KVM_HC_KICK_CPU 5
/* /*
* hypercalls use architecture specific * hypercalls use architecture specific
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment