Commit 7a02c8d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull more parisc updates from Helge Deller:

 - Oscar Carter contributed a patch which fixes parisc's usage of
   dereference_function_descriptor() and thus will allow using the
   -Wcast-function-type compiler option in the top-level Makefile

 - Sven Schnelle fixed a bug in the SBA code to prevent crashes during
   kexec

 - John David Anglin provided implementations for __smp_store_release()
   and __smp_load_acquire barriers() which avoids using the sync
   assembler instruction and thus speeds up barrier paths

 - Some whitespace cleanups in parisc's atomic.h header file

* 'parisc-5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Implement __smp_store_release and __smp_load_acquire barriers
  parisc: mask out enable and reserved bits from sba imask
  parisc: Whitespace cleanups in atomic.h
  parisc/kernel/ftrace: Remove function callback casts
  sections.h: dereference_function_descriptor() returns void pointer
parents 8cd84b70 e96ebd58
......@@ -34,13 +34,13 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
......@@ -85,7 +85,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
_atomic_spin_lock_irqsave(v, flags); \
v->counter c_op i; \
_atomic_spin_unlock_irqrestore(v, flags); \
} \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
......@@ -148,7 +148,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
_atomic_spin_lock_irqsave(v, flags); \
v->counter c_op i; \
_atomic_spin_unlock_irqrestore(v, flags); \
} \
}
#define ATOMIC64_OP_RETURN(op, c_op) \
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
......
......@@ -26,6 +26,67 @@
#define __smp_rmb() mb()
#define __smp_wmb() mb()
#define __smp_store_release(p, v) \
do { \
typeof(p) __p = (p); \
union { typeof(*p) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile("stb,ma %0,0(%1)" \
: : "r"(*(__u8 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
case 2: \
asm volatile("sth,ma %0,0(%1)" \
: : "r"(*(__u16 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
case 4: \
asm volatile("stw,ma %0,0(%1)" \
: : "r"(*(__u32 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
case 8: \
if (IS_ENABLED(CONFIG_64BIT)) \
asm volatile("std,ma %0,0(%1)" \
: : "r"(*(__u64 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
} \
} while (0)
#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile("ldb,ma 0(%1),%0" \
: "=r"(*(__u8 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
case 2: \
asm volatile("ldh,ma 0(%1),%0" \
: "=r"(*(__u16 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
case 4: \
asm volatile("ldw,ma 0(%1),%0" \
: "=r"(*(__u32 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
case 8: \
if (IS_ENABLED(CONFIG_64BIT)) \
asm volatile("ldd,ma 0(%1),%0" \
: "=r"(*(__u64 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
} \
__u.__val; \
})
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
......
......@@ -64,7 +64,8 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
function_trace_op, regs);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
if (dereference_function_descriptor(ftrace_graph_return) !=
dereference_function_descriptor(ftrace_stub) ||
ftrace_graph_entry != ftrace_graph_entry_stub) {
unsigned long *parent_rp;
......
......@@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
......
......@@ -60,8 +60,8 @@ extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
#ifndef dereference_function_descriptor
#define dereference_function_descriptor(p) (p)
#define dereference_kernel_function_descriptor(p) (p)
#define dereference_function_descriptor(p) ((void *)(p))
#define dereference_kernel_function_descriptor(p) ((void *)(p))
#endif
/* random extra sections (if any). Override
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment