Commit d6bb7a1a authored by Mark Salter's avatar Mark Salter Committed by Linus Torvalds

mn10300: add cc clobbers to asm statements

gcc 4.2.1 for MN10300 is more agressive than the older gcc in
reordering/moving other insns between an insn that sets flags and an insn
that uses those flags.  This leads to trouble with asm statements which
are missing an explicit "cc" clobber.  This patch adds the explicit "cc"
clobber to asm statements which do indeed clobber the condition flags.
Signed-off-by: default avatarMark Salter <msalter@redhat.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b0641e86
......@@ -165,7 +165,7 @@ static inline __attribute__((const))
unsigned long __ffs(unsigned long x)
{
int bit;
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x));
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x) : "cc");
return bit;
}
......@@ -177,7 +177,7 @@ static inline __attribute__((const))
int __ilog2_u32(u32 n)
{
int bit;
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n));
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n) : "cc");
return bit;
}
......
......@@ -72,6 +72,7 @@ unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
);
return result;
......@@ -92,6 +93,7 @@ signed __muldiv64s(signed val, signed mult, signed div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
);
return result;
......
......@@ -143,6 +143,7 @@ do { \
" mov %0,epsw \n" \
: "=&d"(tmp) \
: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
: "cc" \
); \
} while (0)
......
......@@ -22,7 +22,7 @@ do { \
" mov %0,%1 \n" \
: "=d"(w) \
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
: "memory" \
: "cc", "memory" \
); \
} while (0)
......
......@@ -316,7 +316,7 @@ do { \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
: "memory"); \
: "cc", "memory"); \
} \
} while (0)
......@@ -352,7 +352,7 @@ do { \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
: "memory"); \
: "cc", "memory"); \
} \
} while (0)
......
......@@ -380,7 +380,8 @@ static int mask_test_and_clear(volatile u8 *ptr, u8 mask)
u32 epsw;
asm volatile(" bclr %1,(%2) \n"
" mov epsw,%0 \n"
: "=d"(epsw) : "d"(mask), "a"(ptr));
: "=d"(epsw) : "d"(mask), "a"(ptr)
: "cc", "memory");
return !(epsw & EPSW_FLAG_Z);
}
......
......@@ -22,6 +22,7 @@ static inline unsigned short from32to16(__wsum sum)
" addc 0xffff,%0 \n"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
: "cc"
);
return sum >> 16;
}
......
......@@ -28,7 +28,8 @@ void __delay(unsigned long loops)
"2: add -1,%0 \n"
" bne 2b \n"
: "=&d" (d0)
: "0" (loops));
: "0" (loops)
: "cc");
}
EXPORT_SYMBOL(__delay);
......
......@@ -62,7 +62,7 @@ do { \
" .previous" \
:"=&r"(res), "=r"(count), "=&r"(w) \
:"i"(-EFAULT), "1"(count), "a"(src), "a"(dst) \
:"memory"); \
: "memory", "cc"); \
} while (0)
long
......@@ -109,7 +109,7 @@ do { \
".previous\n" \
: "+r"(size), "=&r"(w) \
: "a"(addr), "d"(0) \
: "memory"); \
: "memory", "cc"); \
} while (0)
unsigned long
......@@ -161,6 +161,6 @@ long strnlen_user(const char *s, long n)
".previous\n"
:"=d"(res), "=&r"(w)
:"0"(0), "a"(s), "r"(n)
:"memory");
: "memory", "cc");
return res;
}
......@@ -633,13 +633,13 @@ static int misalignment_addr(unsigned long *registers, unsigned long sp,
goto displace_or_inc;
case SD24:
tmp = disp << 8;
asm("asr 8,%0" : "=r"(tmp) : "0"(tmp));
asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case SIMM4_2:
tmp = opcode >> 4 & 0x0f;
tmp <<= 28;
asm("asr 28,%0" : "=r"(tmp) : "0"(tmp));
asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case IMM8:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment