Commit 94fd582e authored by Richard Henderson's avatar Richard Henderson

[ALPHA] Use more compiler builtins instead of inline assembly.

parent 7ae4323c
...@@ -264,13 +264,11 @@ static inline unsigned long ffz(unsigned long word) ...@@ -264,13 +264,11 @@ static inline unsigned long ffz(unsigned long word)
{ {
#if defined(__alpha_cix__) && defined(__alpha_fix__) #if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV67 can calculate it directly. */ /* Whee. EV67 can calculate it directly. */
unsigned long result; return __kernel_cttz(~word);
__asm__("cttz %1,%0" : "=r"(result) : "r"(~word));
return result;
#else #else
unsigned long bits, qofs, bofs; unsigned long bits, qofs, bofs;
__asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL)); bits = __kernel_cmpbge(word, ~0UL);
qofs = ffz_b(bits); qofs = ffz_b(bits);
bits = __kernel_extbl(word, qofs); bits = __kernel_extbl(word, qofs);
bofs = ffz_b(bits); bofs = ffz_b(bits);
...@@ -286,13 +284,11 @@ static inline unsigned long __ffs(unsigned long word) ...@@ -286,13 +284,11 @@ static inline unsigned long __ffs(unsigned long word)
{ {
#if defined(__alpha_cix__) && defined(__alpha_fix__) #if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV67 can calculate it directly. */ /* Whee. EV67 can calculate it directly. */
unsigned long result; return __kernel_cttz(word);
__asm__("cttz %1,%0" : "=r"(result) : "r"(word));
return result;
#else #else
unsigned long bits, qofs, bofs; unsigned long bits, qofs, bofs;
__asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word)); bits = __kernel_cmpbge(word, 0);
qofs = ffz_b(bits); qofs = ffz_b(bits);
bits = __kernel_extbl(word, qofs); bits = __kernel_extbl(word, qofs);
bofs = ffz_b(~bits); bofs = ffz_b(~bits);
...@@ -311,8 +307,8 @@ static inline unsigned long __ffs(unsigned long word) ...@@ -311,8 +307,8 @@ static inline unsigned long __ffs(unsigned long word)
static inline int ffs(int word) static inline int ffs(int word)
{ {
int result = __ffs(word); int result = __ffs(word) + 1;
return word ? result+1 : 0; return word ? result : 0;
} }
/* /*
...@@ -321,9 +317,7 @@ static inline int ffs(int word) ...@@ -321,9 +317,7 @@ static inline int ffs(int word)
#if defined(__alpha_cix__) && defined(__alpha_fix__) #if defined(__alpha_cix__) && defined(__alpha_fix__)
static inline int fls(int word) static inline int fls(int word)
{ {
long result; return 64 - __kernel_ctlz(word & 0xffffffff);
__asm__("ctlz %1,%0" : "=r"(result) : "r"(word & 0xffffffff));
return 64 - result;
} }
#else #else
#define fls generic_fls #define fls generic_fls
...@@ -332,11 +326,10 @@ static inline int fls(int word) ...@@ -332,11 +326,10 @@ static inline int fls(int word)
/* Compute powers of two for the given integer. */ /* Compute powers of two for the given integer. */
static inline int floor_log2(unsigned long word) static inline int floor_log2(unsigned long word)
{ {
long bit;
#if defined(__alpha_cix__) && defined(__alpha_fix__) #if defined(__alpha_cix__) && defined(__alpha_fix__)
__asm__("ctlz %1,%0" : "=r"(bit) : "r"(word)); return 63 - __kernel_ctlz(word);
return 63 - bit;
#else #else
long bit;
for (bit = -1; word ; bit++) for (bit = -1; word ; bit++)
word >>= 1; word >>= 1;
return bit; return bit;
...@@ -358,9 +351,7 @@ static inline int ceil_log2(unsigned int word) ...@@ -358,9 +351,7 @@ static inline int ceil_log2(unsigned int word)
/* Whee. EV67 can calculate it directly. */ /* Whee. EV67 can calculate it directly. */
static inline unsigned long hweight64(unsigned long w) static inline unsigned long hweight64(unsigned long w)
{ {
unsigned long result; return __kernel_ctpop(w);
__asm__("ctpop %1,%0" : "=r"(result) : "r"(w));
return result;
} }
#define hweight32(x) hweight64((x) & 0xfffffffful) #define hweight32(x) hweight64((x) & 0xfffffffful)
...@@ -415,11 +406,11 @@ find_next_zero_bit(void * addr, unsigned long size, unsigned long offset) ...@@ -415,11 +406,11 @@ find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
if (!size) if (!size)
return result; return result;
tmp = *p; tmp = *p;
found_first: found_first:
tmp |= ~0UL << size; tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */ if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */ return result + size; /* Nope. */
found_middle: found_middle:
return result + ffz(tmp); return result + ffz(tmp);
} }
...@@ -456,11 +447,11 @@ find_next_bit(void * addr, unsigned long size, unsigned long offset) ...@@ -456,11 +447,11 @@ find_next_bit(void * addr, unsigned long size, unsigned long offset)
if (!size) if (!size)
return result; return result;
tmp = *p; tmp = *p;
found_first: found_first:
tmp &= ~0UL >> (64 - size); tmp &= ~0UL >> (64 - size);
if (!tmp) if (!tmp)
return result + size; return result + size;
found_middle: found_middle:
return result + __ffs(tmp); return result + __ffs(tmp);
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _ALPHA_BYTEORDER_H #define _ALPHA_BYTEORDER_H
#include <asm/types.h> #include <asm/types.h>
#include <asm/compiler.h>
#ifdef __GNUC__ #ifdef __GNUC__
...@@ -23,11 +24,8 @@ static __inline __u32 __attribute__((__const)) __arch__swab32(__u32 x) ...@@ -23,11 +24,8 @@ static __inline __u32 __attribute__((__const)) __arch__swab32(__u32 x)
__u64 t0, t1, t2, t3; __u64 t0, t1, t2, t3;
__asm__("inslh %1, 7, %0" /* t0 : 0000000000AABBCC */ t0 = __kernel_inslh(x, 7); /* t0 : 0000000000AABBCC */
: "=r"(t0) : "r"(x)); t1 = __kernel_inswl(x, 3); /* t1 : 000000CCDD000000 */
__asm__("inswl %1, 3, %0" /* t1 : 000000CCDD000000 */
: "=r"(t1) : "r"(x));
t1 |= t0; /* t1 : 000000CCDDAABBCC */ t1 |= t0; /* t1 : 000000CCDDAABBCC */
t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */
t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */
......
...@@ -9,40 +9,58 @@ ...@@ -9,40 +9,58 @@
* these tests and macros. * these tests and macros.
*/ */
#if 0 #if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
#define __kernel_insbl(val, shift) \ # define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
(((unsigned long)(val) & 0xfful) << ((shift) * 8)) # define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
#define __kernel_inswl(val, shift) \ # define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
(((unsigned long)(val) & 0xfffful) << ((shift) * 8)) # define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
#define __kernel_insql(val, shift) \ # define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
((unsigned long)(val) << ((shift) * 8)) # define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
# define __kernel_cttz(x) __builtin_ctz(x)
# define __kernel_ctlz(x) __builtin_clz(x)
# define __kernel_ctpop(x) __builtin_popcount(x)
#else #else
#define __kernel_insbl(val, shift) \ # define __kernel_insbl(val, shift) \
({ unsigned long __kir; \ ({ unsigned long __kir; \
__asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; }) __kir; })
#define __kernel_inswl(val, shift) \ # define __kernel_inswl(val, shift) \
({ unsigned long __kir; \ ({ unsigned long __kir; \
__asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; }) __kir; })
#define __kernel_insql(val, shift) \ # define __kernel_insql(val, shift) \
({ unsigned long __kir; \ ({ unsigned long __kir; \
__asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; }) __kir; })
#endif # define __kernel_inslh(val, shift) \
({ unsigned long __kir; \
#if 0 && (__GNUC__ > 2 || __GNUC_MINOR__ >= 92) __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
#define __kernel_extbl(val, shift) (((val) >> (((shift) & 7) * 8)) & 0xfful) __kir; })
#define __kernel_extwl(val, shift) (((val) >> (((shift) & 7) * 8)) & 0xfffful) # define __kernel_extbl(val, shift) \
#else
#define __kernel_extbl(val, shift) \
({ unsigned long __kir; \ ({ unsigned long __kir; \
__asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; }) __kir; })
#define __kernel_extwl(val, shift) \ # define __kernel_extwl(val, shift) \
({ unsigned long __kir; \ ({ unsigned long __kir; \
__asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
__kir; }) __kir; })
# define __kernel_cmpbge(a, b) \
({ unsigned long __kir; \
__asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(val)); \
__kir; })
# define __kernel_cttz(x) \
({ unsigned long __kir; \
__asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
# define __kernel_ctlz(x) \
({ unsigned long __kir; \
__asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
# define __kernel_ctpop(x) \
({ unsigned long __kir; \
__asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment