Commit 6baaeada authored by Paul Burton's avatar Paul Burton

MIPS: Provide unroll() macro, use it for cache ops

Currently we have a lot of duplication in asm/r4kcache.h to handle
manually unrolling loops of cache ops for various line sizes, and we
have to explicitly handle the difference in cache op immediate width
between MIPSr6 & earlier ISA revisions with further duplication.

Introduce an unroll() macro in asm/unroll.h which expands to a switch
statement which is used to call a function or expand a preprocessor
macro a compile-time constant number of times in a row - effectively
explicitly unrolling a loop. We make use of this here to remove the
cache op duplication & will use it further in later patches.

A nice side effect of this is that calculating the cache op offset
immediate is now the compiler's responsibility, so we're no longer
sensitive to the width change of that immediate in MIPSr6 & will be
similarly agnostic to immediate width in any future supported ISA.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
parent a14bf1dc
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_UNROLL_H__
#define __ASM_UNROLL_H__
/*
* Explicitly unroll a loop, for use in cases where doing so is performance
* critical.
*
* Ideally we'd rely upon the compiler to provide this but there's no commonly
* available means to do so. For example GCC's "#pragma GCC unroll"
* functionality would be ideal but is only available from GCC 8 onwards. Using
* -funroll-loops is an option but GCC tends to make poor choices when
* compiling our string functions. -funroll-all-loops leads to massive code
* bloat, even if only applied to the string functions.
*/
#define unroll(times, fn, ...) do { \
extern void bad_unroll(void) \
__compiletime_error("Unsupported unroll"); \
\
/* \
* We can't unroll if the number of iterations isn't \
* compile-time constant. Unfortunately GCC versions \
* up until 4.6 tend to miss obvious constants & cause \
* this check to fail, even though they go on to \
* generate reasonable code for the switch statement, \
* so we skip the sanity check for those compilers. \
*/ \
BUILD_BUG_ON(GCC_VERSION >= 40700 && \
!__builtin_constant_p(times)); \
\
switch (times) { \
case 32: fn(__VA_ARGS__); /* fall through */ \
case 31: fn(__VA_ARGS__); /* fall through */ \
case 30: fn(__VA_ARGS__); /* fall through */ \
case 29: fn(__VA_ARGS__); /* fall through */ \
case 28: fn(__VA_ARGS__); /* fall through */ \
case 27: fn(__VA_ARGS__); /* fall through */ \
case 26: fn(__VA_ARGS__); /* fall through */ \
case 25: fn(__VA_ARGS__); /* fall through */ \
case 24: fn(__VA_ARGS__); /* fall through */ \
case 23: fn(__VA_ARGS__); /* fall through */ \
case 22: fn(__VA_ARGS__); /* fall through */ \
case 21: fn(__VA_ARGS__); /* fall through */ \
case 20: fn(__VA_ARGS__); /* fall through */ \
case 19: fn(__VA_ARGS__); /* fall through */ \
case 18: fn(__VA_ARGS__); /* fall through */ \
case 17: fn(__VA_ARGS__); /* fall through */ \
case 16: fn(__VA_ARGS__); /* fall through */ \
case 15: fn(__VA_ARGS__); /* fall through */ \
case 14: fn(__VA_ARGS__); /* fall through */ \
case 13: fn(__VA_ARGS__); /* fall through */ \
case 12: fn(__VA_ARGS__); /* fall through */ \
case 11: fn(__VA_ARGS__); /* fall through */ \
case 10: fn(__VA_ARGS__); /* fall through */ \
case 9: fn(__VA_ARGS__); /* fall through */ \
case 8: fn(__VA_ARGS__); /* fall through */ \
case 7: fn(__VA_ARGS__); /* fall through */ \
case 6: fn(__VA_ARGS__); /* fall through */ \
case 5: fn(__VA_ARGS__); /* fall through */ \
case 4: fn(__VA_ARGS__); /* fall through */ \
case 3: fn(__VA_ARGS__); /* fall through */ \
case 2: fn(__VA_ARGS__); /* fall through */ \
case 1: fn(__VA_ARGS__); /* fall through */ \
case 0: break; \
\
default: \
/* \
* Either the iteration count is unreasonable \
* or we need to add more cases above. \
*/ \
bad_unroll(); \
break; \
} \
} while (0)
#endif /* __ASM_UNROLL_H__ */
......@@ -271,12 +271,14 @@ static inline void tx49_blast_icache32(void)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
cache32_unroll32(addr|ws, Index_Invalidate_I);
cache_unroll(32, kernel_cache, Index_Invalidate_I,
addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
cache32_unroll32(addr|ws, Index_Invalidate_I);
cache_unroll(32, kernel_cache, Index_Invalidate_I,
addr | ws, 32);
}
static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
......@@ -302,12 +304,14 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
cache32_unroll32(addr|ws, Index_Invalidate_I);
cache_unroll(32, kernel_cache, Index_Invalidate_I,
addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
cache32_unroll32(addr|ws, Index_Invalidate_I);
cache_unroll(32, kernel_cache, Index_Invalidate_I,
addr | ws, 32);
}
static void (* r4k_blast_icache_page)(unsigned long addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment