Commit c8075847 authored by Richard Henderson's avatar Richard Henderson

Update Alpha SMP for the new scheduler and preempt api change.

parent 8a3fb763
This diff is collapsed.
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/compiler.h>
/* /*
* Copyright 1994, Linus Torvalds. * Copyright 1994, Linus Torvalds.
...@@ -60,14 +61,14 @@ clear_bit(unsigned long nr, volatile void * addr) ...@@ -60,14 +61,14 @@ clear_bit(unsigned long nr, volatile void * addr)
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%3\n" "1: ldl_l %0,%3\n"
" and %0,%2,%0\n" " bic %0,%2,%0\n"
" stl_c %0,%1\n" " stl_c %0,%1\n"
" beq %0,2f\n" " beq %0,2f\n"
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
:"=&r" (temp), "=m" (*m) :"=&r" (temp), "=m" (*m)
:"Ir" (~(1UL << (nr & 31))), "m" (*m)); :"Ir" (1UL << (nr & 31)), "m" (*m));
} }
/* /*
...@@ -246,12 +247,15 @@ test_bit(int nr, volatile void * addr) ...@@ -246,12 +247,15 @@ test_bit(int nr, volatile void * addr)
*/ */
static inline unsigned long ffz_b(unsigned long x) static inline unsigned long ffz_b(unsigned long x)
{ {
unsigned long sum = 0; unsigned long sum, x1, x2, x4;
x = ~x & -~x; /* set first 0 bit, clear others */ x = ~x & -~x; /* set first 0 bit, clear others */
if (x & 0xF0) sum += 4; x1 = x & 0xAA;
if (x & 0xCC) sum += 2; x2 = x & 0xCC;
if (x & 0xAA) sum += 1; x4 = x & 0xF0;
sum = x2 ? 2 : 0;
sum += (x4 != 0) * 4;
sum += (x1 != 0);
return sum; return sum;
} }
...@@ -268,7 +272,7 @@ static inline unsigned long ffz(unsigned long word) ...@@ -268,7 +272,7 @@ static inline unsigned long ffz(unsigned long word)
__asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL)); __asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL));
qofs = ffz_b(bits); qofs = ffz_b(bits);
__asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs)); bits = __kernel_extbl(word, qofs);
bofs = ffz_b(bits); bofs = ffz_b(bits);
return qofs*8 + bofs; return qofs*8 + bofs;
...@@ -290,7 +294,7 @@ static inline unsigned long __ffs(unsigned long word) ...@@ -290,7 +294,7 @@ static inline unsigned long __ffs(unsigned long word)
__asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word)); __asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word));
qofs = ffz_b(bits); qofs = ffz_b(bits);
__asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs)); bits = __kernel_extbl(word, qofs);
bofs = ffz_b(~bits); bofs = ffz_b(~bits);
return qofs*8 + bofs; return qofs*8 + bofs;
...@@ -349,6 +353,14 @@ static inline unsigned long hweight64(unsigned long w) ...@@ -349,6 +353,14 @@ static inline unsigned long hweight64(unsigned long w)
#define hweight16(x) hweight64((x) & 0xfffful) #define hweight16(x) hweight64((x) & 0xfffful)
#define hweight8(x) hweight64((x) & 0xfful) #define hweight8(x) hweight64((x) & 0xfful)
#else #else
static inline unsigned long hweight64(unsigned long w)
{
unsigned long result;
for (result = 0; w ; w >>= 1)
result += (w & 1);
return result;
}
#define hweight32(x) generic_hweight32(x) #define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x) #define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x) #define hweight8(x) generic_hweight8(x)
......
...@@ -38,12 +38,12 @@ typedef struct { ...@@ -38,12 +38,12 @@ typedef struct {
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); }) #define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
#if CONFIG_DEBUG_SPINLOCK #if CONFIG_DEBUG_SPINLOCK
extern void spin_unlock(spinlock_t * lock); extern void _raw_spin_unlock(spinlock_t * lock);
extern void debug_spin_lock(spinlock_t * lock, const char *, int); extern void debug_spin_lock(spinlock_t * lock, const char *, int);
extern int debug_spin_trylock(spinlock_t * lock, const char *, int); extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
#define spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) #define _raw_spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
#define spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) #define _raw_spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
#define spin_lock_own(LOCK, LOCATION) \ #define spin_lock_own(LOCK, LOCATION) \
do { \ do { \
...@@ -54,13 +54,13 @@ do { \ ...@@ -54,13 +54,13 @@ do { \
(LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \ (LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \
} while (0) } while (0)
#else #else
static inline void spin_unlock(spinlock_t * lock) static inline void _raw_spin_unlock(spinlock_t * lock)
{ {
mb(); mb();
lock->lock = 0; lock->lock = 0;
} }
static inline void spin_lock(spinlock_t * lock) static inline void _raw_spin_lock(spinlock_t * lock)
{ {
long tmp; long tmp;
...@@ -83,7 +83,11 @@ static inline void spin_lock(spinlock_t * lock) ...@@ -83,7 +83,11 @@ static inline void spin_lock(spinlock_t * lock)
: "m"(lock->lock) : "memory"); : "m"(lock->lock) : "memory");
} }
#define spin_trylock(lock) (!test_and_set_bit(0,(lock))) static inline int _raw_spin_trylock(spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
#define spin_lock_own(LOCK, LOCATION) ((void)0) #define spin_lock_own(LOCK, LOCATION) ((void)0)
#endif /* CONFIG_DEBUG_SPINLOCK */ #endif /* CONFIG_DEBUG_SPINLOCK */
...@@ -98,10 +102,10 @@ typedef struct { ...@@ -98,10 +102,10 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#if CONFIG_DEBUG_RWLOCK #if CONFIG_DEBUG_RWLOCK
extern void write_lock(rwlock_t * lock); extern void _raw_write_lock(rwlock_t * lock);
extern void read_lock(rwlock_t * lock); extern void _raw_read_lock(rwlock_t * lock);
#else #else
static inline void write_lock(rwlock_t * lock) static inline void _raw_write_lock(rwlock_t * lock)
{ {
long regx; long regx;
...@@ -121,7 +125,7 @@ static inline void write_lock(rwlock_t * lock) ...@@ -121,7 +125,7 @@ static inline void write_lock(rwlock_t * lock)
: "0" (*(volatile int *)lock) : "memory"); : "0" (*(volatile int *)lock) : "memory");
} }
static inline void read_lock(rwlock_t * lock) static inline void _raw_read_lock(rwlock_t * lock)
{ {
long regx; long regx;
...@@ -142,13 +146,13 @@ static inline void read_lock(rwlock_t * lock) ...@@ -142,13 +146,13 @@ static inline void read_lock(rwlock_t * lock)
} }
#endif /* CONFIG_DEBUG_RWLOCK */ #endif /* CONFIG_DEBUG_RWLOCK */
static inline void write_unlock(rwlock_t * lock) static inline void _raw_write_unlock(rwlock_t * lock)
{ {
mb(); mb();
*(volatile int *)lock = 0; *(volatile int *)lock = 0;
} }
static inline void read_unlock(rwlock_t * lock) static inline void _raw_read_unlock(rwlock_t * lock)
{ {
long regx; long regx;
__asm__ __volatile__( __asm__ __volatile__(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment