Commit a9ac985e authored by John David Anglin's avatar John David Anglin Committed by Kleber Sacilotto de Souza

parisc: Remove unnecessary barriers from spinlock.h

BugLink: https://bugs.launchpad.net/bugs/1792377

commit 3b885ac1 upstream.

Now that mb() is an instruction barrier, it will slow performance if we issue
unnecessary barriers.

The spinlock defines have a number of unnecessary barriers.  The __ldcw()
define is both a hardware and compiler barrier.  The mb() barriers in the
routines using __ldcw() serve no purpose.

The only barrier needed is the one in arch_spin_unlock().  We need to ensure
all accesses are complete prior to releasing the lock.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.0+
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent d6f89fe7
...@@ -21,7 +21,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, ...@@ -21,7 +21,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{ {
volatile unsigned int *a; volatile unsigned int *a;
mb();
a = __ldcw_align(x); a = __ldcw_align(x);
while (__ldcw(a) == 0) while (__ldcw(a) == 0)
while (*a == 0) while (*a == 0)
...@@ -31,16 +30,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, ...@@ -31,16 +30,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable(); local_irq_disable();
} else } else
cpu_relax(); cpu_relax();
mb();
} }
static inline void arch_spin_unlock(arch_spinlock_t *x) static inline void arch_spin_unlock(arch_spinlock_t *x)
{ {
volatile unsigned int *a; volatile unsigned int *a;
mb();
a = __ldcw_align(x); a = __ldcw_align(x);
*a = 1;
mb(); mb();
*a = 1;
} }
static inline int arch_spin_trylock(arch_spinlock_t *x) static inline int arch_spin_trylock(arch_spinlock_t *x)
...@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) ...@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a; volatile unsigned int *a;
int ret; int ret;
mb();
a = __ldcw_align(x); a = __ldcw_align(x);
ret = __ldcw(a) != 0; ret = __ldcw(a) != 0;
mb();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment