Commit e6155e6b authored by Hirokazu Takata's avatar Hirokazu Takata Committed by Linus Torvalds

[PATCH] m32r: change to use temporary register variables

I made a patch to upgrade some header files for m32r.

- Change to use temporary register variables allocated by the compiler,
  instead of fiexd register varialbes.
- Change __inline__ to inline.
Signed-off-by: default avatarHirokazu Takata <takata@linux-m32r.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 20f3a7cb
This diff is collapsed.
#ifndef _ASM_M32R_SEMAPHORE_H #ifndef _ASM_M32R_SEMAPHORE_H
#define _ASM_M32R_SEMAPHORE_H #define _ASM_M32R_SEMAPHORE_H
/* $Id$ */
#include <linux/linkage.h> #include <linux/linkage.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -10,39 +8,15 @@ ...@@ -10,39 +8,15 @@
/* /*
* SMP- and interrupt-safe semaphores.. * SMP- and interrupt-safe semaphores..
* *
* (C) Copyright 1996 Linus Torvalds * Copyright (C) 1996 Linus Torvalds
* * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
* Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
* the original code and to make semaphore waits
* interruptible so that processes waiting on
* semaphores can be killed.
* Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
* functions in asm/sempahore-helper.h while fixing a
* potential and subtle race discovered by Ulrich Schmid
* in down_interruptible(). Since I started to play here I
* also implemented the `trylock' semaphore operation.
* 1999-07-02 Artur Skawina <skawina@geocities.com>
* Optimized "0(ecx)" -> "(ecx)" (the assembler does not
* do this). Changed calling sequences from push/jmp to
* traditional call/ret.
* Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
* Some hacks to ensure compatibility with recent
* GCC snapshots, to avoid stack corruption when compiling
* with -fomit-frame-pointer. It's not sure if this will
* be fixed in GCC, as our previous implementation was a
* bit dubious.
*
* If you would like to see an analysis of this implementation, please
* ftp to gcom.com and download the file
* /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
*
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <asm/system.h>
#include <asm/atomic.h>
#undef LOAD #undef LOAD
#undef STORE #undef STORE
...@@ -58,21 +32,14 @@ struct semaphore { ...@@ -58,21 +32,14 @@ struct semaphore {
atomic_t count; atomic_t count;
int sleepers; int sleepers;
wait_queue_head_t wait; wait_queue_head_t wait;
#ifdef WAITQUEUE_DEBUG
long __magic;
#endif
}; };
#ifdef WAITQUEUE_DEBUG #define __SEMAPHORE_INITIALIZER(name, n) \
# define __SEM_DEBUG_INIT(name) \ { \
, (int)&(name).__magic .count = ATOMIC_INIT(n), \
#else .sleepers = 0, \
# define __SEM_DEBUG_INIT(name) .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
#endif }
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
#define __MUTEX_INITIALIZER(name) \ #define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1) __SEMAPHORE_INITIALIZER(name,1)
...@@ -83,7 +50,7 @@ struct semaphore { ...@@ -83,7 +50,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static __inline__ void sema_init (struct semaphore *sem, int val) static inline void sema_init (struct semaphore *sem, int val)
{ {
/* /*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
...@@ -94,17 +61,14 @@ static __inline__ void sema_init (struct semaphore *sem, int val) ...@@ -94,17 +61,14 @@ static __inline__ void sema_init (struct semaphore *sem, int val)
atomic_set(&sem->count, val); atomic_set(&sem->count, val);
sem->sleepers = 0; sem->sleepers = 0;
init_waitqueue_head(&sem->wait); init_waitqueue_head(&sem->wait);
#ifdef WAITQUEUE_DEBUG
sem->__magic = (int)&sem->__magic;
#endif
} }
static __inline__ void init_MUTEX (struct semaphore *sem) static inline void init_MUTEX (struct semaphore *sem)
{ {
sema_init(sem, 1); sema_init(sem, 1);
} }
static __inline__ void init_MUTEX_LOCKED (struct semaphore *sem) static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{ {
sema_init(sem, 0); sema_init(sem, 0);
} }
...@@ -120,19 +84,15 @@ asmlinkage int __down_trylock(struct semaphore * sem); ...@@ -120,19 +84,15 @@ asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem); asmlinkage void __up(struct semaphore * sem);
/* /*
* This is ugly, but we want the default case to fall through. * Atomically decrement the semaphore's count. If it goes negative,
* "__down_failed" is a special asm handler that calls the C * block the calling thread in the TASK_UNINTERRUPTIBLE state.
* routine that actually waits. See arch/i386/kernel/semaphore.c
*/ */
static __inline__ void down(struct semaphore * sem) static inline void down(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down \n\t" "# down \n\t"
...@@ -140,7 +100,7 @@ static __inline__ void down(struct semaphore * sem) ...@@ -140,7 +100,7 @@ static __inline__ void down(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -149,7 +109,7 @@ static __inline__ void down(struct semaphore * sem) ...@@ -149,7 +109,7 @@ static __inline__ void down(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp < 0) if (unlikely(count < 0))
__down(sem); __down(sem);
} }
...@@ -157,16 +117,13 @@ static __inline__ void down(struct semaphore * sem) ...@@ -157,16 +117,13 @@ static __inline__ void down(struct semaphore * sem)
* Interruptible try to acquire a semaphore. If we obtained * Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR * it, return zero. If we were interrupted, returns -EINTR
*/ */
static __inline__ int down_interruptible(struct semaphore * sem) static inline int down_interruptible(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
int result = 0; int result = 0;
#ifdef WAITQUEUE_DEBUG might_sleep();
CHECK_MAGIC(sem->__magic);
#endif
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down_interruptible \n\t" "# down_interruptible \n\t"
...@@ -174,7 +131,7 @@ static __inline__ int down_interruptible(struct semaphore * sem) ...@@ -174,7 +131,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -183,7 +140,7 @@ static __inline__ int down_interruptible(struct semaphore * sem) ...@@ -183,7 +140,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp < 0) if (unlikely(count < 0))
result = __down_interruptible(sem); result = __down_interruptible(sem);
return result; return result;
...@@ -193,16 +150,12 @@ static __inline__ int down_interruptible(struct semaphore * sem) ...@@ -193,16 +150,12 @@ static __inline__ int down_interruptible(struct semaphore * sem)
* Non-blockingly attempt to down() a semaphore. * Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it * Returns zero if we acquired it
*/ */
static __inline__ int down_trylock(struct semaphore * sem) static inline int down_trylock(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
int result = 0; int result = 0;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down_trylock \n\t" "# down_trylock \n\t"
...@@ -210,7 +163,7 @@ static __inline__ int down_trylock(struct semaphore * sem) ...@@ -210,7 +163,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -219,7 +172,7 @@ static __inline__ int down_trylock(struct semaphore * sem) ...@@ -219,7 +172,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp < 0) if (unlikely(count < 0))
result = __down_trylock(sem); result = __down_trylock(sem);
return result; return result;
...@@ -231,14 +184,10 @@ static __inline__ int down_trylock(struct semaphore * sem) ...@@ -231,14 +184,10 @@ static __inline__ int down_trylock(struct semaphore * sem)
* The default case (no contention) will result in NO * The default case (no contention) will result in NO
* jumps for both down() and up(). * jumps for both down() and up().
*/ */
static __inline__ void up(struct semaphore * sem) static inline void up(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -247,7 +196,7 @@ static __inline__ void up(struct semaphore * sem) ...@@ -247,7 +196,7 @@ static __inline__ void up(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #1; \n\t" "addi %0, #1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -256,11 +205,10 @@ static __inline__ void up(struct semaphore * sem) ...@@ -256,11 +205,10 @@ static __inline__ void up(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp <= 0) if (unlikely(count <= 0))
__up(sem); __up(sem);
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_M32R_SEMAPHORE_H */ #endif /* _ASM_M32R_SEMAPHORE_H */
#ifndef _ASM_M32R_SPINLOCK_H #ifndef _ASM_M32R_SPINLOCK_H
#define _ASM_M32R_SPINLOCK_H #define _ASM_M32R_SPINLOCK_H
/* $Id$ */
/* /*
* linux/include/asm-m32r/spinlock.h * linux/include/asm-m32r/spinlock.h
* orig : i386 2.4.10
* *
* M32R version: * M32R version:
* Copyright (C) 2001, 2002 Hitoshi Yamamoto * Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
*/ */
#include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */ #include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
...@@ -41,6 +39,9 @@ typedef struct { ...@@ -41,6 +39,9 @@ typedef struct {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
unsigned magic; unsigned magic;
#endif #endif
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
} spinlock_t; } spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead #define SPINLOCK_MAGIC 0xdead4ead
...@@ -66,22 +67,17 @@ typedef struct { ...@@ -66,22 +67,17 @@ typedef struct {
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
/* /**
* This works. Despite all the confusion. * _raw_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
* _raw_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/ */
static inline int _raw_spin_trylock(spinlock_t *lock)
/*======================================================================*
* Try spin lock
*======================================================================*
* Argument:
* arg0: lock
* Return value:
* =1: Success
* =0: Failure
*======================================================================*/
static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{ {
int oldval; int oldval;
unsigned long tmp1, tmp2;
/* /*
* lock->lock : =1 : unlock * lock->lock : =1 : unlock
...@@ -93,16 +89,16 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock) ...@@ -93,16 +89,16 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
*/ */
__asm__ __volatile__ ( __asm__ __volatile__ (
"# spin_trylock \n\t" "# spin_trylock \n\t"
"ldi r4, #0; \n\t" "ldi %1, #0; \n\t"
"mvfc r5, psw; \n\t" "mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("%0", "r6", "%1") DCACHE_CLEAR("%0", "r6", "%3")
"lock %0, @%1; \n\t" "lock %0, @%3; \n\t"
"unlock r4, @%1; \n\t" "unlock %1, @%3; \n\t"
"mvtc r5, psw; \n\t" "mvtc %2, psw; \n\t"
: "=&r" (oldval) : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
: "r" (&lock->lock) : "r" (&lock->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
...@@ -111,8 +107,10 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock) ...@@ -111,8 +107,10 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
return (oldval > 0); return (oldval > 0);
} }
static __inline__ void _raw_spin_lock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock)
{ {
unsigned long tmp0, tmp1;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
__label__ here; __label__ here;
here: here:
...@@ -135,31 +133,31 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock) ...@@ -135,31 +133,31 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock)
"# spin_lock \n\t" "# spin_lock \n\t"
".fillinsn \n" ".fillinsn \n"
"1: \n\t" "1: \n\t"
"mvfc r5, psw; \n\t" "mvfc %1, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #-1; \n\t" "addi %0, #-1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
"bltz r4, 2f; \n\t" "bltz %0, 2f; \n\t"
LOCK_SECTION_START(".balign 4 \n\t") LOCK_SECTION_START(".balign 4 \n\t")
".fillinsn \n" ".fillinsn \n"
"2: \n\t" "2: \n\t"
"ld r4, @%0; \n\t" "ld %0, @%2; \n\t"
"bgtz r4, 1b; \n\t" "bgtz %0, 1b; \n\t"
"bra 2b; \n\t" "bra 2b; \n\t"
LOCK_SECTION_END LOCK_SECTION_END
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1)
: "r" (&lock->lock) : "r" (&lock->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
BUG_ON(lock->magic != SPINLOCK_MAGIC); BUG_ON(lock->magic != SPINLOCK_MAGIC);
...@@ -184,6 +182,9 @@ typedef struct { ...@@ -184,6 +182,9 @@ typedef struct {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
unsigned magic; unsigned magic;
#endif #endif
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
} rwlock_t; } rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed #define RWLOCK_MAGIC 0xdeaf1eed
...@@ -211,8 +212,10 @@ typedef struct { ...@@ -211,8 +212,10 @@ typedef struct {
*/ */
/* the spinlock helpers are in arch/i386/kernel/semaphore.c */ /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
static __inline__ void _raw_read_lock(rwlock_t *rw) static inline void _raw_read_lock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
BUG_ON(rw->magic != RWLOCK_MAGIC); BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif #endif
...@@ -231,40 +234,42 @@ static __inline__ void _raw_read_lock(rwlock_t *rw) ...@@ -231,40 +234,42 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
"# read_lock \n\t" "# read_lock \n\t"
".fillinsn \n" ".fillinsn \n"
"1: \n\t" "1: \n\t"
"mvfc r5, psw; \n\t" "mvfc %1, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #-1; \n\t" "addi %0, #-1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
"bltz r4, 2f; \n\t" "bltz %0, 2f; \n\t"
LOCK_SECTION_START(".balign 4 \n\t") LOCK_SECTION_START(".balign 4 \n\t")
".fillinsn \n" ".fillinsn \n"
"2: \n\t" "2: \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #1; \n\t" "addi %0, #1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
".fillinsn \n" ".fillinsn \n"
"3: \n\t" "3: \n\t"
"ld r4, @%0; \n\t" "ld %0, @%2; \n\t"
"bgtz r4, 1b; \n\t" "bgtz %0, 1b; \n\t"
"bra 3b; \n\t" "bra 3b; \n\t"
LOCK_SECTION_END LOCK_SECTION_END
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_write_lock(rwlock_t *rw) static inline void _raw_write_lock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1, tmp2;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
BUG_ON(rw->magic != RWLOCK_MAGIC); BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif #endif
...@@ -281,85 +286,91 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) ...@@ -281,85 +286,91 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
*/ */
__asm__ __volatile__ ( __asm__ __volatile__ (
"# write_lock \n\t" "# write_lock \n\t"
"seth r5, #high(" RW_LOCK_BIAS_STR "); \n\t" "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
"or3 r5, r5, #low(" RW_LOCK_BIAS_STR "); \n\t" "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
".fillinsn \n" ".fillinsn \n"
"1: \n\t" "1: \n\t"
"mvfc r6, psw; \n\t" "mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r7", "%0") DCACHE_CLEAR("%0", "r7", "%3")
"lock r4, @%0; \n\t" "lock %0, @%3; \n\t"
"sub r4, r5; \n\t" "sub %0, %1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%3; \n\t"
"mvtc r6, psw; \n\t" "mvtc %2, psw; \n\t"
"bnez r4, 2f; \n\t" "bnez %0, 2f; \n\t"
LOCK_SECTION_START(".balign 4 \n\t") LOCK_SECTION_START(".balign 4 \n\t")
".fillinsn \n" ".fillinsn \n"
"2: \n\t" "2: \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r7", "%0") DCACHE_CLEAR("%0", "r7", "%3")
"lock r4, @%0; \n\t" "lock %0, @%3; \n\t"
"add r4, r5; \n\t" "add %0, %1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%3; \n\t"
"mvtc r6, psw; \n\t" "mvtc %2, psw; \n\t"
".fillinsn \n" ".fillinsn \n"
"3: \n\t" "3: \n\t"
"ld r4, @%0; \n\t" "ld %0, @%3; \n\t"
"beq r4, r5, 1b; \n\t" "beq %0, %1, 1b; \n\t"
"bra 3b; \n\t" "bra 3b; \n\t"
LOCK_SECTION_END LOCK_SECTION_END
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5", "r6" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r7" , "r7"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_read_unlock(rwlock_t *rw) static inline void _raw_read_unlock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1;
__asm__ __volatile__ ( __asm__ __volatile__ (
"# read_unlock \n\t" "# read_unlock \n\t"
"mvfc r5, psw; \n\t" "mvfc %1, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #1; \n\t" "addi %0, #1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_write_unlock(rwlock_t *rw) static inline void _raw_write_unlock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
"# write_unlock \n\t" "# write_unlock \n\t"
"seth r5, #high(" RW_LOCK_BIAS_STR "); \n\t" "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
"or3 r5, r5, #low(" RW_LOCK_BIAS_STR "); \n\t" "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
"mvfc r6, psw; \n\t" "mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r7", "%0") DCACHE_CLEAR("%0", "r7", "%3")
"lock r4, @%0; \n\t" "lock %0, @%3; \n\t"
"add r4, r5; \n\t" "add %0, %1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%3; \n\t"
"mvtc r6, psw; \n\t" "mvtc %2, psw; \n\t"
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5", "r6" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r7" , "r7"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ int _raw_write_trylock(rwlock_t *lock) #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
static inline int _raw_write_trylock(rwlock_t *lock)
{ {
atomic_t *count = (atomic_t *)lock; atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count)) if (atomic_sub_and_test(RW_LOCK_BIAS, count))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment