Commit 74a0ba61 authored by Kumar Gala's avatar Kumar Gala

[POWERPC] Move inline asm eieio to using eieio inline function

Use the eieio function so we can redefine what eieio does rather
than direct inline asm.  This is part code clean up and partially
because not all PPCs have eieio (book-e has mbar that maps to eieio).
Signed-off-by: default avatarKumar Gala <galak@kernel.crashing.org>
parent cef1a3a5
......@@ -35,7 +35,7 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count)
asm volatile("sync");
do {
tmp = *port;
asm volatile("eieio");
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
......@@ -66,7 +66,7 @@ void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
asm volatile("sync");
do {
tmp = *port;
asm volatile("eieio");
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
......@@ -97,7 +97,7 @@ void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
asm volatile("sync");
do {
tmp = *port;
asm volatile("eieio");
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
......@@ -155,21 +155,21 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
__asm__ __volatile__ ("sync" : : : "memory");
while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
__asm__ __volatile__ ("eieio" : : : "memory");
eieio();
vsrc++;
dest++;
n--;
}
while(n > 4) {
*((u32 *)dest) = *((volatile u32 *)vsrc);
__asm__ __volatile__ ("eieio" : : : "memory");
eieio();
vsrc += 4;
dest += 4;
n -= 4;
}
while(n) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
__asm__ __volatile__ ("eieio" : : : "memory");
eieio();
vsrc++;
dest++;
n--;
......
......@@ -163,7 +163,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
hptep->r = hpte_r;
/* Guarantee the second dword is visible before the valid bit */
__asm__ __volatile__ ("eieio" : : : "memory");
eieio();
/*
* Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte
......
......@@ -55,7 +55,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
for (entry = 0; entry < 8; entry++, ste++) {
if (!(ste->esid_data & STE_ESID_V)) {
ste->vsid_data = vsid_data;
asm volatile("eieio":::"memory");
eieio();
ste->esid_data = esid_data;
return (global_entry | entry);
}
......@@ -101,7 +101,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
asm volatile("sync" : : : "memory"); /* Order update */
castout_ste->vsid_data = vsid_data;
asm volatile("eieio" : : : "memory"); /* Order update */
eieio(); /* Order update */
castout_ste->esid_data = esid_data;
asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
......
......@@ -43,7 +43,7 @@
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#define smp_wmb() eieio()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment