From 8184c812b4a9de8dd98d7fd85c6a35df82c52624 Mon Sep 17 00:00:00 2001 From: Richard Henderson <rth@kanga.twiddle.net> Date: Mon, 21 Jul 2003 09:46:09 -0700 Subject: [PATCH] [ALPHA] Add atomic64_t. --- include/asm-alpha/atomic.h | 85 +++++++++++++++++++++++++++++++++++++- include/asm-alpha/local.h | 40 ++++++++++++++++++ 2 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 include/asm-alpha/local.h diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 349cd5613f56..cd253c8c6c7b 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -16,11 +16,16 @@ * the user gave us, not some alias that contains the same information. */ typedef struct { volatile int counter; } atomic_t; +typedef struct { volatile long counter; } atomic64_t; -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) + +#define atomic_read(v) ((v)->counter + 0) +#define atomic64_read(v) ((v)->counter + 0) -#define atomic_read(v) ((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) +#define atomic64_set(v,i) ((v)->counter = (i)) /* * To get proper branch prediction for the main line, we must branch @@ -43,6 +48,21 @@ static __inline__ void atomic_add(int i, atomic_t * v) :"Ir" (i), "m" (v->counter)); } +static __inline__ void atomic64_add(long i, atomic64_t * v) +{ + unsigned long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%2,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter) + :"Ir" (i), "m" (v->counter)); +} + static __inline__ void atomic_sub(int i, atomic_t * v) { unsigned long temp; @@ -58,6 +78,22 @@ static __inline__ void atomic_sub(int i, atomic_t * v) :"Ir" (i), "m" (v->counter)); } +static __inline__ void atomic64_sub(long i, atomic64_t * v) +{ + unsigned long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " subq %0,%2,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter) + :"Ir" (i), "m" (v->counter)); +} + + /* * Same as above, but return the result value */ @@ -79,6 +115,24 @@ static __inline__ long atomic_add_return(int i, atomic_t * v) return result; } +static __inline__ long atomic64_add_return(long i, atomic64_t * v) +{ + long temp, result; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " addq %0,%3,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter), "=&r" (result) + :"Ir" (i), "m" (v->counter) : "memory"); + return result; +} + static __inline__ long atomic_sub_return(int i, atomic_t * v) { long temp, result; @@ -97,14 +151,41 @@ static __inline__ long atomic_sub_return(int i, atomic_t * v) return result; } +static __inline__ long atomic64_sub_return(long i, atomic64_t * v) +{ + long temp, result; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " subq %0,%3,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter), "=&r" (result) + :"Ir" (i), "m" (v->counter) : "memory"); + return result; +} + #define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) + #define atomic_inc_return(v) atomic_add_return(1,(v)) +#define atomic64_inc_return(v) atomic64_add_return(1,(v)) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) + #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) #define atomic_inc(v) atomic_add(1,(v)) +#define atomic64_inc(v) atomic64_add(1,(v)) + #define atomic_dec(v) atomic_sub(1,(v)) +#define atomic64_dec(v) atomic64_sub(1,(v)) #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() diff --git a/include/asm-alpha/local.h b/include/asm-alpha/local.h new file mode 100644 index 000000000000..49f543a8f11e --- /dev/null +++ b/include/asm-alpha/local.h @@ -0,0 +1,40 @@ +#ifndef _ARCH_SPARC64_LOCAL_H +#define _ARCH_SPARC64_LOCAL_H + +#include <linux/percpu.h> +#include <asm/atomic.h> + +typedef atomic64_t local_t; + +#define LOCAL_INIT(i) ATOMIC64_INIT(i) +#define local_read(v) atomic64_read(v) +#define local_set(v,i) atomic64_set(v,i) + +#define local_inc(v) atomic64_inc(v) +#define local_dec(v) atomic64_inc(v) +#define local_add(i, v) atomic64_add(i, v) +#define local_sub(i, v) atomic64_sub(i, v) + +#define __local_inc(v) ((v)->counter++) +#define __local_dec(v) ((v)->counter++) +#define __local_add(i,v) ((v)->counter+=(i)) +#define __local_sub(i,v) ((v)->counter-=(i)) + +/* Use these for per-cpu local_t variables: on some archs they are + * much more efficient than these naive implementations. Note they take + * a variable, not an address. + */ +#define cpu_local_read(v) local_read(&__get_cpu_var(v)) +#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) + +#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) +#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) +#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) +#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) + +#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) +#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) +#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) +#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) + +#endif /* _ARCH_SPARC64_LOCAL_H */ -- 2.30.9