Commit 7a7546b3 authored by David Vrabel's avatar David Vrabel Committed by Konrad Rzeszutek Wilk

x86: xen: size struct xen_spinlock to always fit in arch_spinlock_t

If NR_CPUS < 256 then arch_spinlock_t is only 16 bits wide but struct
xen_spinlock is 32 bits.  When a spin lock is contended and
xl->spinners is modified the two bytes immediately after the spin lock
would be corrupted.

This is a regression caused by 84eb950d
(x86, ticketlock: Clean up types and accessors) which reduced the size
of arch_spinlock_t.

Fix this by making xl->spinners a u8 if NR_CPUS < 256.  A
BUILD_BUG_ON() is also added to check the sizes of the two structures
are compatible.

In many cases this was not noticable as there would often be padding
bytes after the lock (e.g., if any of CONFIG_GENERIC_LOCKBREAK,
CONFIG_DEBUG_SPINLOCK, or CONFIG_DEBUG_LOCK_ALLOC were enabled).

The bnx2 driver is affected. In struct bnx2, phy_lock and
indirect_lock may have no padding after them.  Contention on phy_lock
would corrupt indirect_lock making it appear locked and the driver
would deadlock.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@goop.org>
Acked-by: default avatarIan Campbell <ian.campbell@citrix.com>
CC: stable@kernel.org #only 3.2
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 8ea11f7f
...@@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start) ...@@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start)
} }
#endif /* CONFIG_XEN_DEBUG_FS */ #endif /* CONFIG_XEN_DEBUG_FS */
/*
* Size struct xen_spinlock so it's the same as arch_spinlock_t.
*/
#if NR_CPUS < 256
typedef u8 xen_spinners_t;
# define inc_spinners(xl) \
asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
# define dec_spinners(xl) \
asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
#else
typedef u16 xen_spinners_t;
# define inc_spinners(xl) \
asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
# define dec_spinners(xl) \
asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
#endif
struct xen_spinlock { struct xen_spinlock {
unsigned char lock; /* 0 -> free; 1 -> locked */ unsigned char lock; /* 0 -> free; 1 -> locked */
unsigned short spinners; /* count of waiting cpus */ xen_spinners_t spinners; /* count of waiting cpus */
}; };
static int xen_spin_is_locked(struct arch_spinlock *lock) static int xen_spin_is_locked(struct arch_spinlock *lock)
...@@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) ...@@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
wmb(); /* set lock of interest before count */ wmb(); /* set lock of interest before count */
asm(LOCK_PREFIX " incw %0" inc_spinners(xl);
: "+m" (xl->spinners) : : "memory");
return prev; return prev;
} }
...@@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) ...@@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
*/ */
static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev) static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
{ {
asm(LOCK_PREFIX " decw %0" dec_spinners(xl);
: "+m" (xl->spinners) : : "memory");
wmb(); /* decrement count before restoring lock */ wmb(); /* decrement count before restoring lock */
__this_cpu_write(lock_spinners, prev); __this_cpu_write(lock_spinners, prev);
} }
...@@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu) ...@@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu)
void __init xen_init_spinlocks(void) void __init xen_init_spinlocks(void)
{ {
BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
pv_lock_ops.spin_is_locked = xen_spin_is_locked; pv_lock_ops.spin_is_locked = xen_spin_is_locked;
pv_lock_ops.spin_is_contended = xen_spin_is_contended; pv_lock_ops.spin_is_contended = xen_spin_is_contended;
pv_lock_ops.spin_lock = xen_spin_lock; pv_lock_ops.spin_lock = xen_spin_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment