Commit 5d869168 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6

into kernel.bkbits.net:/home/davem/sparc-2.6
parents d1c0dfc8 6afb3c32
...@@ -52,6 +52,66 @@ ...@@ -52,6 +52,66 @@
/* Used to protect the IRQ action lists */ /* Used to protect the IRQ action lists */
spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED; spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_SMP
#define SMP_NOP2 "nop; nop;\n\t"
#define SMP_NOP3 "nop; nop; nop;\n\t"
#else
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
unsigned long __local_irq_save(void)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
SMP_NOP3 /* Sun4m + Cypress + SMP bug */
"or %0, %2, %1\n\t"
"wr %1, 0, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (retval), "=r" (tmp)
: "i" (PSR_PIL)
: "memory");
return retval;
}
void local_irq_enable(void)
{
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
SMP_NOP3 /* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0\n\t"
"wr %0, 0, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (tmp)
: "i" (PSR_PIL)
: "memory");
}
void local_irq_restore(unsigned long old_psr)
{
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"and %2, %1, %2\n\t"
SMP_NOP2 /* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0\n\t"
"wr %0, %2, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (tmp)
: "i" (PSR_PIL), "r" (old_psr)
: "memory");
}
EXPORT_SYMBOL(__local_irq_save);
EXPORT_SYMBOL(local_irq_enable);
EXPORT_SYMBOL(local_irq_restore);
/* /*
* Dave Redman (djhr@tadpole.co.uk) * Dave Redman (djhr@tadpole.co.uk)
* *
......
...@@ -148,11 +148,12 @@ extern char reboot_command []; ...@@ -148,11 +148,12 @@ extern char reboot_command [];
extern void (*prom_palette)(int); extern void (*prom_palette)(int);
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
void machine_halt(void) void machine_halt(void)
{ {
sti(); local_irq_enable();
mdelay(8); mdelay(8);
cli(); local_irq_disable();
if (!serial_console && prom_palette) if (!serial_console && prom_palette)
prom_palette (1); prom_palette (1);
prom_halt(); prom_halt();
...@@ -165,9 +166,9 @@ void machine_restart(char * cmd) ...@@ -165,9 +166,9 @@ void machine_restart(char * cmd)
{ {
char *p; char *p;
sti(); local_irq_enable();
mdelay(8); mdelay(8);
cli(); local_irq_disable();
p = strchr (reboot_command, '\n'); p = strchr (reboot_command, '\n');
if (p) *p = 0; if (p) *p = 0;
......
...@@ -61,7 +61,7 @@ void __down(struct semaphore * sem) ...@@ -61,7 +61,7 @@ void __down(struct semaphore * sem)
* Add "everybody else" into it. They aren't * Add "everybody else" into it. They aren't
* playing, because we own the spinlock. * playing, because we own the spinlock.
*/ */
if (!atomic_add_negative(sleepers - 1, &sem->count)) { if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0; sem->sleepers = 0;
break; break;
} }
...@@ -101,7 +101,7 @@ int __down_interruptible(struct semaphore * sem) ...@@ -101,7 +101,7 @@ int __down_interruptible(struct semaphore * sem)
if (signal_pending(current)) { if (signal_pending(current)) {
retval = -EINTR; retval = -EINTR;
sem->sleepers = 0; sem->sleepers = 0;
atomic_add(sleepers, &sem->count); atomic24_add(sleepers, &sem->count);
break; break;
} }
...@@ -111,7 +111,7 @@ int __down_interruptible(struct semaphore * sem) ...@@ -111,7 +111,7 @@ int __down_interruptible(struct semaphore * sem)
* "-1" is because we're still hoping to get * "-1" is because we're still hoping to get
* the lock. * the lock.
*/ */
if (!atomic_add_negative(sleepers - 1, &sem->count)) { if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0; sem->sleepers = 0;
break; break;
} }
...@@ -146,7 +146,7 @@ int __down_trylock(struct semaphore * sem) ...@@ -146,7 +146,7 @@ int __down_trylock(struct semaphore * sem)
* Add "everybody else" and us into it. They aren't * Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock. * playing, because we own the spinlock.
*/ */
if (!atomic_add_negative(sleepers, &sem->count)) if (!atomic24_add_negative(sleepers, &sem->count))
wake_up(&sem->wait); wake_up(&sem->wait);
spin_unlock_irqrestore(&semaphore_lock, flags); spin_unlock_irqrestore(&semaphore_lock, flags);
......
...@@ -56,6 +56,9 @@ int smp_activated = 0; ...@@ -56,6 +56,9 @@ int smp_activated = 0;
volatile int __cpu_number_map[NR_CPUS]; volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS]; volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */ cycles_t cacheflush_time = 0; /* XXX */
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
/* The only guaranteed locking primitive available on all Sparc /* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
......
...@@ -86,8 +86,8 @@ extern int __divdi3(int, int); ...@@ -86,8 +86,8 @@ extern int __divdi3(int, int);
extern void dump_thread(struct pt_regs *, struct user *); extern void dump_thread(struct pt_regs *, struct user *);
/* Private functions with odd calling conventions. */ /* Private functions with odd calling conventions. */
extern void ___atomic_add(void); extern void ___atomic24_add(void);
extern void ___atomic_sub(void); extern void ___atomic24_sub(void);
extern void ___set_bit(void); extern void ___set_bit(void);
extern void ___clear_bit(void); extern void ___clear_bit(void);
extern void ___change_bit(void); extern void ___change_bit(void);
...@@ -147,8 +147,8 @@ EXPORT_SYMBOL(sparc_valid_addr_bitmap); ...@@ -147,8 +147,8 @@ EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL(phys_base); EXPORT_SYMBOL(phys_base);
/* Atomic operations. */ /* Atomic operations. */
EXPORT_SYMBOL(___atomic_add); EXPORT_SYMBOL(___atomic24_add);
EXPORT_SYMBOL(___atomic_sub); EXPORT_SYMBOL(___atomic24_sub);
/* Bit operations. */ /* Bit operations. */
EXPORT_SYMBOL(___set_bit); EXPORT_SYMBOL(___set_bit);
...@@ -159,10 +159,6 @@ EXPORT_SYMBOL(___change_bit); ...@@ -159,10 +159,6 @@ EXPORT_SYMBOL(___change_bit);
/* IRQ implementation. */ /* IRQ implementation. */
EXPORT_SYMBOL(global_irq_holder); EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
/* Misc SMP information */ /* Misc SMP information */
EXPORT_SYMBOL(__cpu_number_map); EXPORT_SYMBOL(__cpu_number_map);
......
...@@ -45,8 +45,8 @@ ___xchg32_sun4md: ...@@ -45,8 +45,8 @@ ___xchg32_sun4md:
/* Read asm-sparc/atomic.h carefully to understand how this works for SMP. /* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
* Really, some things here for SMP are overly clever, go read the header. * Really, some things here for SMP are overly clever, go read the header.
*/ */
.globl ___atomic_add .globl ___atomic24_add
___atomic_add: ___atomic24_add:
rd %psr, %g3 ! Keep the code small, old way was stupid rd %psr, %g3 ! Keep the code small, old way was stupid
nop; nop; nop; ! Let the bits set nop; nop; nop; ! Let the bits set
or %g3, PSR_PIL, %g7 ! Disable interrupts or %g3, PSR_PIL, %g7 ! Disable interrupts
...@@ -56,13 +56,13 @@ ___atomic_add: ...@@ -56,13 +56,13 @@ ___atomic_add:
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP. 1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it? orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope... bne 1b ! Nope...
ld [%g1], %g7 ! Load locked atomic_t ld [%g1], %g7 ! Load locked atomic24_t
sra %g7, 8, %g7 ! Get signed 24-bit integer sra %g7, 8, %g7 ! Get signed 24-bit integer
add %g7, %g2, %g2 ! Add in argument add %g7, %g2, %g2 ! Add in argument
sll %g2, 8, %g7 ! Transpose back to atomic_t sll %g2, 8, %g7 ! Transpose back to atomic24_t
st %g7, [%g1] ! Clever: This releases the lock as well. st %g7, [%g1] ! Clever: This releases the lock as well.
#else #else
ld [%g1], %g7 ! Load locked atomic_t ld [%g1], %g7 ! Load locked atomic24_t
add %g7, %g2, %g2 ! Add in argument add %g7, %g2, %g2 ! Add in argument
st %g2, [%g1] ! Store it back st %g2, [%g1] ! Store it back
#endif #endif
...@@ -71,8 +71,8 @@ ___atomic_add: ...@@ -71,8 +71,8 @@ ___atomic_add:
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7 mov %g4, %o7 ! Restore %o7
.globl ___atomic_sub .globl ___atomic24_sub
___atomic_sub: ___atomic24_sub:
rd %psr, %g3 ! Keep the code small, old way was stupid rd %psr, %g3 ! Keep the code small, old way was stupid
nop; nop; nop; ! Let the bits set nop; nop; nop; ! Let the bits set
or %g3, PSR_PIL, %g7 ! Disable interrupts or %g3, PSR_PIL, %g7 ! Disable interrupts
...@@ -82,13 +82,13 @@ ___atomic_sub: ...@@ -82,13 +82,13 @@ ___atomic_sub:
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP. 1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it? orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope... bne 1b ! Nope...
ld [%g1], %g7 ! Load locked atomic_t ld [%g1], %g7 ! Load locked atomic24_t
sra %g7, 8, %g7 ! Get signed 24-bit integer sra %g7, 8, %g7 ! Get signed 24-bit integer
sub %g7, %g2, %g2 ! Subtract argument sub %g7, %g2, %g2 ! Subtract argument
sll %g2, 8, %g7 ! Transpose back to atomic_t sll %g2, 8, %g7 ! Transpose back to atomic24_t
st %g7, [%g1] ! Clever: This releases the lock as well st %g7, [%g1] ! Clever: This releases the lock as well
#else #else
ld [%g1], %g7 ! Load locked atomic_t ld [%g1], %g7 ! Load locked atomic24_t
sub %g7, %g2, %g2 ! Subtract argument sub %g7, %g2, %g2 ! Subtract argument
st %g2, [%g1] ! Store it back st %g2, [%g1] ! Store it back
#endif #endif
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
/* An unsigned long type for operations which are atomic for a single /* An unsigned long type for operations which are atomic for a single
* CPU. Usually used in combination with per-cpu variables. */ * CPU. Usually used in combination with per-cpu variables. */
#if BITS_PER_LONG == 32 && !defined(CONFIG_SPARC32) #if BITS_PER_LONG == 32
/* Implement in terms of atomics. */ /* Implement in terms of atomics. */
/* Don't use typedef: don't want them to be mixed with atomic_t's. */ /* Don't use typedef: don't want them to be mixed with atomic_t's. */
......
...@@ -27,8 +27,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -27,8 +27,7 @@ typedef struct { volatile int counter; } atomic_t;
* atomic_read - read atomic variable * atomic_read - read atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically reads the value of @v. Note that the guaranteed * Atomically reads the value of @v.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
...@@ -37,8 +36,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -37,8 +36,7 @@ typedef struct { volatile int counter; } atomic_t;
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* @i: required value * @i: required value
* *
* Atomically sets the value of @v to @i. Note that the guaranteed * Atomically sets the value of @v to @i.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
...@@ -47,8 +45,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -47,8 +45,7 @@ typedef struct { volatile int counter; } atomic_t;
* @i: integer value to add * @i: integer value to add
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically adds @i to @v. Note that the guaranteed useful range * Atomically adds @i to @v.
* of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_add(int i, atomic_t *v)
{ {
...@@ -63,8 +60,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) ...@@ -63,8 +60,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* @i: integer value to subtract * @i: integer value to subtract
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically subtracts @i from @v. Note that the guaranteed * Atomically subtracts @i from @v.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_sub(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v)
{ {
...@@ -81,8 +77,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v) ...@@ -81,8 +77,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
* *
* Atomically subtracts @i from @v and returns * Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. Note that the guaranteed * other cases.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{ {
...@@ -99,8 +94,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) ...@@ -99,8 +94,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* atomic_inc - increment atomic variable * atomic_inc - increment atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically increments @v by 1. Note that the guaranteed * Atomically increments @v by 1.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_inc(atomic_t *v)
{ {
...@@ -114,8 +108,7 @@ static __inline__ void atomic_inc(atomic_t *v) ...@@ -114,8 +108,7 @@ static __inline__ void atomic_inc(atomic_t *v)
* atomic_dec - decrement atomic variable * atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically decrements @v by 1. Note that the guaranteed * Atomically decrements @v by 1.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_dec(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v)
{ {
...@@ -131,8 +124,7 @@ static __inline__ void atomic_dec(atomic_t *v) ...@@ -131,8 +124,7 @@ static __inline__ void atomic_dec(atomic_t *v)
* *
* Atomically decrements @v by 1 and * Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. Note that the guaranteed * cases.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_dec_and_test(atomic_t *v) static __inline__ int atomic_dec_and_test(atomic_t *v)
{ {
...@@ -151,8 +143,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) ...@@ -151,8 +143,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
* *
* Atomically increments @v by 1 * Atomically increments @v by 1
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed * other cases.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_inc_and_test(atomic_t *v) static __inline__ int atomic_inc_and_test(atomic_t *v)
{ {
...@@ -172,8 +163,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) ...@@ -172,8 +163,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* *
* Atomically adds @i to @v and returns true * Atomically adds @i to @v and returns true
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed * result is greater than or equal to zero.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_add_negative(int i, atomic_t *v) static __inline__ int atomic_add_negative(int i, atomic_t *v)
{ {
......
...@@ -29,8 +29,7 @@ typedef struct { volatile __s64 counter; } atomic64_t; ...@@ -29,8 +29,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* atomic_read - read atomic variable * atomic_read - read atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically reads the value of @v. Note that the guaranteed * Atomically reads the value of @v.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
...@@ -46,8 +45,7 @@ typedef struct { volatile __s64 counter; } atomic64_t; ...@@ -46,8 +45,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* @i: required value * @i: required value
* *
* Atomically sets the value of @v to @i. Note that the guaranteed * Atomically sets the value of @v to @i.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_set(v,i) ((v)->counter = (i)) #define atomic_set(v,i) ((v)->counter = (i))
...@@ -68,8 +66,7 @@ typedef struct { volatile __s64 counter; } atomic64_t; ...@@ -68,8 +66,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* @i: integer value to add * @i: integer value to add
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically adds @i to @v. Note that the guaranteed useful range * Atomically adds @i to @v.
* of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_add(int i, atomic_t * v) static __inline__ void atomic_add(int i, atomic_t * v)
{ {
...@@ -85,8 +82,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -85,8 +82,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
* @i: integer value to subtract * @i: integer value to subtract
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically subtracts @i from @v. Note that the guaranteed * Atomically subtracts @i from @v.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_sub(int i, atomic_t * v) static __inline__ void atomic_sub(int i, atomic_t * v)
{ {
...@@ -137,8 +133,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -137,8 +133,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
* @i: integer value to add * @i: integer value to add
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically adds @i to @v. Note that the guaranteed useful range * Atomically adds @i to @v.
* of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_add(int i, atomic_t * v) static __inline__ void atomic_add(int i, atomic_t * v)
{ {
...@@ -158,8 +153,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -158,8 +153,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
* @i: integer value to subtract * @i: integer value to subtract
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically subtracts @i from @v. Note that the guaranteed * Atomically subtracts @i from @v.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_sub(int i, atomic_t * v) static __inline__ void atomic_sub(int i, atomic_t * v)
{ {
...@@ -390,8 +384,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) ...@@ -390,8 +384,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* *
* Atomically subtracts @i from @v and returns * Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. Note that the guaranteed * other cases.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
...@@ -412,8 +405,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) ...@@ -412,8 +405,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* *
* Atomically increments @v by 1 * Atomically increments @v by 1
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed * other cases.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
...@@ -433,8 +425,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) ...@@ -433,8 +425,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* *
* Atomically decrements @v by 1 and * Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. Note that the guaranteed * cases.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
...@@ -452,8 +443,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) ...@@ -452,8 +443,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* atomic_inc - increment atomic variable * atomic_inc - increment atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically increments @v by 1. Note that the guaranteed * Atomically increments @v by 1.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_inc(v) atomic_add(1,(v)) #define atomic_inc(v) atomic_add(1,(v))
...@@ -469,8 +459,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) ...@@ -469,8 +459,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* atomic_dec - decrement and test * atomic_dec - decrement and test
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically decrements @v by 1. Note that the guaranteed * Atomically decrements @v by 1.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_dec(v) atomic_sub(1,(v)) #define atomic_dec(v) atomic_sub(1,(v))
...@@ -489,8 +478,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) ...@@ -489,8 +478,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* *
* Atomically adds @i to @v and returns true * Atomically adds @i to @v and returns true
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed * result is greater than or equal to zero.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0) #define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
......
...@@ -2,21 +2,82 @@ ...@@ -2,21 +2,82 @@
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/ */
#ifndef __ARCH_SPARC_ATOMIC__ #ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__ #define __ARCH_SPARC_ATOMIC__
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef CONFIG_SMP
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
#else /* SMP */
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) 0
#endif /* SMP */
static inline int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = (v->counter += i);
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#else #else
/* We do the bulk of the actual work out of line in two common /* We do the bulk of the actual work out of line in two common
...@@ -33,9 +94,9 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -33,9 +94,9 @@ typedef struct { volatile int counter; } atomic_t;
* 31 8 7 0 * 31 8 7 0
*/ */
#define ATOMIC_INIT(i) { ((i) << 8) } #define ATOMIC24_INIT(i) { ((i) << 8) }
static __inline__ int atomic_read(const atomic_t *v) static inline int atomic24_read(const atomic24_t *v)
{ {
int ret = v->counter; int ret = v->counter;
...@@ -45,10 +106,10 @@ static __inline__ int atomic_read(const atomic_t *v) ...@@ -45,10 +106,10 @@ static __inline__ int atomic_read(const atomic_t *v)
return ret >> 8; return ret >> 8;
} }
#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) #define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#endif #endif
static inline int __atomic_add(int i, atomic_t *v) static inline int __atomic24_add(int i, atomic24_t *v)
{ {
register volatile int *ptr asm("g1"); register volatile int *ptr asm("g1");
register int increment asm("g2"); register int increment asm("g2");
...@@ -61,7 +122,7 @@ static inline int __atomic_add(int i, atomic_t *v) ...@@ -61,7 +122,7 @@ static inline int __atomic_add(int i, atomic_t *v)
__asm__ __volatile__( __asm__ __volatile__(
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_add\n\t" "call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr) : "0" (increment), "r" (ptr)
...@@ -70,7 +131,7 @@ static inline int __atomic_add(int i, atomic_t *v) ...@@ -70,7 +131,7 @@ static inline int __atomic_add(int i, atomic_t *v)
return increment; return increment;
} }
static inline int __atomic_sub(int i, atomic_t *v) static inline int __atomic24_sub(int i, atomic24_t *v)
{ {
register volatile int *ptr asm("g1"); register volatile int *ptr asm("g1");
register int increment asm("g2"); register int increment asm("g2");
...@@ -83,7 +144,7 @@ static inline int __atomic_sub(int i, atomic_t *v) ...@@ -83,7 +144,7 @@ static inline int __atomic_sub(int i, atomic_t *v)
__asm__ __volatile__( __asm__ __volatile__(
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_sub\n\t" "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr) : "0" (increment), "r" (ptr)
...@@ -92,19 +153,19 @@ static inline int __atomic_sub(int i, atomic_t *v) ...@@ -92,19 +153,19 @@ static inline int __atomic_sub(int i, atomic_t *v)
return increment; return increment;
} }
#define atomic_add(i, v) ((void)__atomic_add((i), (v))) #define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v))) #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic_dec_return(v) __atomic_sub(1, (v)) #define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic_inc_return(v) __atomic_add(1, (v)) #define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0) #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0) #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic_inc(v) ((void)__atomic_add(1, (v))) #define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic_dec(v) ((void)__atomic_sub(1, (v))) #define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0) #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
......
#ifndef _ASM_SPARC_DMA_MAPPING_H
#define _ASM_SPARC_DMA_MAPPING_H
#include <linux/config.h> #include <linux/config.h>
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h> #include <asm-generic/dma-mapping.h>
#endif #else
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
BUG();
return NULL;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
BUG();
}
#endif /* PCI */
#endif /* _ASM_SPARC_DMA_MAPPING_H */
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/btfixup.h> #include <asm/btfixup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/atomic.h>
/* /*
* Bus types * Bus types
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
struct semaphore { struct semaphore {
atomic_t count; atomic24_t count;
int sleepers; int sleepers;
wait_queue_head_t wait; wait_queue_head_t wait;
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
...@@ -40,7 +40,7 @@ struct semaphore { ...@@ -40,7 +40,7 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val) static inline void sema_init (struct semaphore *sem, int val)
{ {
atomic_set(&sem->count, val); atomic24_set(&sem->count, val);
sem->sleepers = 0; sem->sleepers = 0;
init_waitqueue_head(&sem->wait); init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
...@@ -78,7 +78,7 @@ static inline void down(struct semaphore * sem) ...@@ -78,7 +78,7 @@ static inline void down(struct semaphore * sem)
__asm__ __volatile__( __asm__ __volatile__(
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_sub\n\t" "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n\t" " add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t" "tst %%g2\n\t"
"bl 2f\n\t" "bl 2f\n\t"
...@@ -115,7 +115,7 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -115,7 +115,7 @@ static inline int down_interruptible(struct semaphore * sem)
__asm__ __volatile__( __asm__ __volatile__(
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_sub\n\t" "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n\t" " add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t" "tst %%g2\n\t"
"bl 2f\n\t" "bl 2f\n\t"
...@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem) ...@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__( __asm__ __volatile__(
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_sub\n\t" "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n\t" " add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t" "tst %%g2\n\t"
"bl 2f\n\t" "bl 2f\n\t"
...@@ -193,7 +193,7 @@ static inline void up(struct semaphore * sem) ...@@ -193,7 +193,7 @@ static inline void up(struct semaphore * sem)
__asm__ __volatile__( __asm__ __volatile__(
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_add\n\t" "call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n\t" " add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t" "tst %%g2\n\t"
"ble 2f\n\t" "ble 2f\n\t"
......
...@@ -171,32 +171,11 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -171,32 +171,11 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
/* /*
* Changing the IRQ level on the Sparc. * Changing the IRQ level on the Sparc.
*/ */
extern __inline__ void setipl(unsigned long __orig_psr) extern void local_irq_restore(unsigned long);
{ extern unsigned long __local_irq_save(void);
__asm__ __volatile__( extern void local_irq_enable(void);
"wr %0, 0x0, %%psr\n\t"
"nop; nop; nop\n"
: /* no outputs */
: "r" (__orig_psr)
: "memory", "cc");
}
extern __inline__ void local_irq_enable(void)
{
unsigned long tmp;
__asm__ __volatile__( static inline unsigned long getipl(void)
"rd %%psr, %0\n\t"
"nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
"andn %0, %1, %0\n\t"
"wr %0, 0x0, %%psr\n\t"
"nop; nop; nop\n"
: "=r" (tmp)
: "i" (PSR_PIL)
: "memory");
}
extern __inline__ unsigned long getipl(void)
{ {
unsigned long retval; unsigned long retval;
...@@ -204,76 +183,11 @@ extern __inline__ unsigned long getipl(void) ...@@ -204,76 +183,11 @@ extern __inline__ unsigned long getipl(void)
return retval; return retval;
} }
#if 0 /* not used */
extern __inline__ unsigned long swap_pil(unsigned long __new_psr)
{
unsigned long retval;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
"and %0, %2, %%g1\n\t"
"and %1, %2, %%g2\n\t"
"xorcc %%g1, %%g2, %%g0\n\t"
"be 1f\n\t"
" nop\n\t"
"wr %0, %2, %%psr\n\t"
"nop; nop; nop;\n"
"1:\n"
: "=&r" (retval)
: "r" (__new_psr), "i" (PSR_PIL)
: "g1", "g2", "memory", "cc");
return retval;
}
#endif
extern __inline__ unsigned long read_psr_and_cli(void)
{
unsigned long retval;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
"or %0, %1, %%g1\n\t"
"wr %%g1, 0x0, %%psr\n\t"
"nop; nop; nop\n\t"
: "=r" (retval)
: "i" (PSR_PIL)
: "g1", "memory");
return retval;
}
#define local_save_flags(flags) ((flags) = getipl()) #define local_save_flags(flags) ((flags) = getipl())
#define local_irq_save(flags) ((flags) = read_psr_and_cli()) #define local_irq_save(flags) ((flags) = __local_irq_save())
#define local_irq_restore(flags) setipl((flags)) #define local_irq_disable() ((void) __local_irq_save())
#define local_irq_disable() ((void) read_psr_and_cli())
#define irqs_disabled() ((getipl() & PSR_PIL) != 0) #define irqs_disabled() ((getipl() & PSR_PIL) != 0)
#ifdef CONFIG_SMP
extern unsigned char global_irq_holder;
#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
extern void __global_cli(void);
extern void __global_sti(void);
extern unsigned long __global_save_flags(void);
extern void __global_restore_flags(unsigned long flags);
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(flags) ((flags)=__global_save_flags())
#define restore_flags(flags) __global_restore_flags(flags)
#else
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#endif
/* XXX Change this if we ever use a PSO mode kernel. */ /* XXX Change this if we ever use a PSO mode kernel. */
#define mb() __asm__ __volatile__ ("" : : : "memory") #define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb() #define rmb() mb()
......
...@@ -29,8 +29,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -29,8 +29,7 @@ typedef struct { volatile int counter; } atomic_t;
* atomic_read - read atomic variable * atomic_read - read atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically reads the value of @v. Note that the guaranteed * Atomically reads the value of @v.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
...@@ -39,8 +38,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -39,8 +38,7 @@ typedef struct { volatile int counter; } atomic_t;
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* @i: required value * @i: required value
* *
* Atomically sets the value of @v to @i. Note that the guaranteed * Atomically sets the value of @v to @i.
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
...@@ -49,8 +47,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -49,8 +47,7 @@ typedef struct { volatile int counter; } atomic_t;
* @i: integer value to add * @i: integer value to add
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically adds @i to @v. Note that the guaranteed useful range * Atomically adds @i to @v.
* of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_add(int i, atomic_t *v)
{ {
...@@ -65,8 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) ...@@ -65,8 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* @i: integer value to subtract * @i: integer value to subtract
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically subtracts @i from @v. Note that the guaranteed * Atomically subtracts @i from @v.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_sub(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v)
{ {
...@@ -83,8 +79,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v) ...@@ -83,8 +79,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
* *
* Atomically subtracts @i from @v and returns * Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. Note that the guaranteed * other cases.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{ {
...@@ -101,8 +96,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) ...@@ -101,8 +96,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* atomic_inc - increment atomic variable * atomic_inc - increment atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically increments @v by 1. Note that the guaranteed * Atomically increments @v by 1.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_inc(atomic_t *v)
{ {
...@@ -116,8 +110,7 @@ static __inline__ void atomic_inc(atomic_t *v) ...@@ -116,8 +110,7 @@ static __inline__ void atomic_inc(atomic_t *v)
* atomic_dec - decrement atomic variable * atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically decrements @v by 1. Note that the guaranteed * Atomically decrements @v by 1.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ void atomic_dec(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v)
{ {
...@@ -133,8 +126,7 @@ static __inline__ void atomic_dec(atomic_t *v) ...@@ -133,8 +126,7 @@ static __inline__ void atomic_dec(atomic_t *v)
* *
* Atomically decrements @v by 1 and * Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. Note that the guaranteed * cases.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_dec_and_test(atomic_t *v) static __inline__ int atomic_dec_and_test(atomic_t *v)
{ {
...@@ -153,8 +145,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) ...@@ -153,8 +145,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
* *
* Atomically increments @v by 1 * Atomically increments @v by 1
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. Note that the guaranteed * other cases.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_inc_and_test(atomic_t *v) static __inline__ int atomic_inc_and_test(atomic_t *v)
{ {
...@@ -174,8 +165,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) ...@@ -174,8 +165,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* *
* Atomically adds @i to @v and returns true * Atomically adds @i to @v and returns true
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. Note that the guaranteed * result is greater than or equal to zero.
* useful range of an atomic_t is only 24 bits.
*/ */
static __inline__ int atomic_add_negative(int i, atomic_t *v) static __inline__ int atomic_add_negative(int i, atomic_t *v)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment