Commit 2c9046b7 authored by Palmer Dabbelt's avatar Palmer Dabbelt

RISC-V: Assorted memory model fixes

These fixes fall into three categories

* The definiton of __smp_{store_release,load_acquire}, which allow us to
  emit a full fence when unnecessary.
* Fixes to avoid relying on the behavior of "*.aqrl" atomics, as those
  are specified in the currently released RISC-V memory model draft in
  a way that makes them useless for Linux.  This might change in the
  future, but now the code matches the memory model spec as it's written
  so at least we're getting closer to something sane.  The actual fix is
  to delete the RISC-V specific atomics and drop back to generic
  versions that use the new fences from above.
* Cleanups to our atomic macros, which are mostly non-functional
  changes.

Unfortunately I haven't given these as thorough of a testing as I
probably should have, but I've poked through the code and they seem
generally OK.
parents 1cead2d7 5ce6c1f3
This diff is collapsed.
......@@ -38,6 +38,21 @@
#define __smp_rmb() RISCV_FENCE(r,r)
#define __smp_wmb() RISCV_FENCE(w,w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(rw,w); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(r,rw); \
___p1; \
})
/*
* This is a very specific barrier: it's currently only used in two places in
* the kernel, both in the scheduler. See include/linux/spinlock.h for the two
......
This diff is collapsed.
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
#ifdef CONFIG_SMP
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
......@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <asm/current.h>
#include <asm/fence.h>
/*
* Simple spin lock operations. These provide no fairness guarantees.
......@@ -28,10 +29,7 @@
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__ (
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
smp_store_release(&lock->lock, 0);
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
......@@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
int tmp = 1, busy;
__asm__ __volatile__ (
"amoswap.w.aq %0, %2, %1"
" amoswap.w %0, %2, %1\n"
RISCV_ACQUIRE_BARRIER
: "=r" (busy), "+A" (lock->lock)
: "r" (tmp)
: "memory");
......@@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bltz %1, 1b\n"
" addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
......@@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bnez %1, 1b\n"
" li %1, -1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
......@@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bltz %1, 1f\n"
" addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
......@@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bnez %1, 1f\n"
" li %1, -1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
......@@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"amoadd.w.rl x0, %1, %0"
RISCV_RELEASE_BARRIER
" amoadd.w x0, %1, %0\n"
: "+A" (lock->lock)
: "r" (-1)
: "memory");
......@@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__ (
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
smp_store_release(&lock->lock, 0);
}
#endif /* _ASM_RISCV_SPINLOCK_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment