Commit 15cba23e authored by Ian Munsie's avatar Ian Munsie Committed by Benjamin Herrenschmidt

powerpc: Support endian agnostic MMIO

This patch maps the MMIO functions for 32bit PowerPC to their
appropriate instructions depending on CPU endianness.

The macros used to create the corresponding inline functions are also
renamed by this patch. Previously they had BE or LE in their names which
was misleading - they had nothing to do with endianness, but actually
created different instruction forms so their new names reflect the
instruction form they are creating (D-Form and X-Form).

Little endian 64bit PowerPC is not supported, so the lack of mappings
(and corresponding breakage) for that case is intentional to bring the
attention of anyone doing a 64bit little endian port. 64bit big endian
is unaffected.

[ Added 64 bit versions - Anton ]
Signed-off-by: default avatarIan Munsie <imunsie@au1.ibm.com>
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 926f160f
...@@ -113,7 +113,7 @@ extern bool isa_io_special; ...@@ -113,7 +113,7 @@ extern bool isa_io_special;
/* gcc 4.0 and older doesn't have 'Z' constraint */ /* gcc 4.0 and older doesn't have 'Z' constraint */
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
#define DEF_MMIO_IN_LE(name, size, insn) \ #define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \ static inline u##size name(const volatile u##size __iomem *addr) \
{ \ { \
u##size ret; \ u##size ret; \
...@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ ...@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \ return ret; \
} }
#define DEF_MMIO_OUT_LE(name, size, insn) \ #define DEF_MMIO_OUT_X(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \ { \
__asm__ __volatile__("sync;"#insn" %1,0,%2" \ __asm__ __volatile__("sync;"#insn" %1,0,%2" \
...@@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ ...@@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
IO_SET_SYNC_FLAG(); \ IO_SET_SYNC_FLAG(); \
} }
#else /* newer gcc */ #else /* newer gcc */
#define DEF_MMIO_IN_LE(name, size, insn) \ #define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \ static inline u##size name(const volatile u##size __iomem *addr) \
{ \ { \
u##size ret; \ u##size ret; \
...@@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ ...@@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \ return ret; \
} }
#define DEF_MMIO_OUT_LE(name, size, insn) \ #define DEF_MMIO_OUT_X(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \ { \
__asm__ __volatile__("sync;"#insn" %1,%y0" \ __asm__ __volatile__("sync;"#insn" %1,%y0" \
...@@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ ...@@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
} }
#endif #endif
#define DEF_MMIO_IN_BE(name, size, insn) \ #define DEF_MMIO_IN_D(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \ static inline u##size name(const volatile u##size __iomem *addr) \
{ \ { \
u##size ret; \ u##size ret; \
...@@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ ...@@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \ return ret; \
} }
#define DEF_MMIO_OUT_BE(name, size, insn) \ #define DEF_MMIO_OUT_D(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \ { \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
...@@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ ...@@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
IO_SET_SYNC_FLAG(); \ IO_SET_SYNC_FLAG(); \
} }
DEF_MMIO_IN_D(in_8, 8, lbz);
DEF_MMIO_OUT_D(out_8, 8, stb);
DEF_MMIO_IN_BE(in_8, 8, lbz); #ifdef __BIG_ENDIAN__
DEF_MMIO_IN_BE(in_be16, 16, lhz); DEF_MMIO_IN_D(in_be16, 16, lhz);
DEF_MMIO_IN_BE(in_be32, 32, lwz); DEF_MMIO_IN_D(in_be32, 32, lwz);
DEF_MMIO_IN_LE(in_le16, 16, lhbrx); DEF_MMIO_IN_X(in_le16, 16, lhbrx);
DEF_MMIO_IN_LE(in_le32, 32, lwbrx); DEF_MMIO_IN_X(in_le32, 32, lwbrx);
DEF_MMIO_OUT_BE(out_8, 8, stb); DEF_MMIO_OUT_D(out_be16, 16, sth);
DEF_MMIO_OUT_BE(out_be16, 16, sth); DEF_MMIO_OUT_D(out_be32, 32, stw);
DEF_MMIO_OUT_BE(out_be32, 32, stw); DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); #else
DEF_MMIO_IN_X(in_be16, 16, lhbrx);
DEF_MMIO_IN_X(in_be32, 32, lwbrx);
DEF_MMIO_IN_D(in_le16, 16, lhz);
DEF_MMIO_IN_D(in_le32, 32, lwz);
DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
DEF_MMIO_OUT_D(out_le16, 16, sth);
DEF_MMIO_OUT_D(out_le32, 32, stw);
#endif /* __BIG_ENDIAN */
#ifdef __powerpc64__ #ifdef __powerpc64__
DEF_MMIO_OUT_BE(out_be64, 64, std);
DEF_MMIO_IN_BE(in_be64, 64, ld); #ifdef __BIG_ENDIAN__
DEF_MMIO_OUT_D(out_be64, 64, std);
DEF_MMIO_IN_D(in_be64, 64, ld);
/* There is no asm instructions for 64 bits reverse loads and stores */ /* There is no asm instructions for 64 bits reverse loads and stores */
static inline u64 in_le64(const volatile u64 __iomem *addr) static inline u64 in_le64(const volatile u64 __iomem *addr)
...@@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val) ...@@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)
{ {
out_be64(addr, swab64(val)); out_be64(addr, swab64(val));
} }
#else
DEF_MMIO_OUT_D(out_le64, 64, std);
DEF_MMIO_IN_D(in_le64, 64, ld);
/* There is no asm instructions for 64 bits reverse loads and stores */
static inline u64 in_be64(const volatile u64 __iomem *addr)
{
return swab64(in_le64(addr));
}
static inline void out_be64(volatile u64 __iomem *addr, u64 val)
{
out_le64(addr, swab64(val));
}
#endif
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment