Commit 11e14896 authored by Vineet Gupta's avatar Vineet Gupta

ARC: untangle cache flush loop

- Remove the ifdef'ery and write distinct versions for each mmu ver even
  if there is some code duplication
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 6c310681
...@@ -140,25 +140,19 @@ void read_decode_cache_bcr(void) ...@@ -140,25 +140,19 @@ void read_decode_cache_bcr(void)
* "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
*/ */
static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, static inline
void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
unsigned long sz, const int op) unsigned long sz, const int op)
{ {
unsigned int aux_cmd, aux_tag; unsigned int aux_cmd;
int num_lines; int num_lines;
const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (op == OP_INV_IC) { if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL; aux_cmd = ARC_REG_IC_IVIL;
#if (CONFIG_ARC_MMU_VER > 2) } else {
aux_tag = ARC_REG_IC_PTAG;
#endif
}
else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
#if (CONFIG_ARC_MMU_VER > 2)
aux_tag = ARC_REG_DC_PTAG;
#endif
} }
/* Ensure we properly floor/ceil the non-line aligned/sized requests /* Ensure we properly floor/ceil the non-line aligned/sized requests
...@@ -167,7 +161,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, ...@@ -167,7 +161,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
* -@paddr will be cache-line aligned already (being page aligned) * -@paddr will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized). * -@sz will be integral multiple of line size (being page sized).
*/ */
if (!full_page_op) { if (!full_page) {
sz += paddr & ~CACHE_LINE_MASK; sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK; paddr &= CACHE_LINE_MASK;
vaddr &= CACHE_LINE_MASK; vaddr &= CACHE_LINE_MASK;
...@@ -175,32 +169,68 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, ...@@ -175,32 +169,68 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
#if (CONFIG_ARC_MMU_VER <= 2)
/* MMUv2 and before: paddr contains stuffed vaddrs bits */ /* MMUv2 and before: paddr contains stuffed vaddrs bits */
paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#else
/* if V-P const for loop, PTAG can be written once outside loop */ while (num_lines-- > 0) {
if (full_page_op) write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
}
}
static inline
void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned int aux_cmd, aux_tag;
int num_lines;
const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
aux_tag = ARC_REG_IC_PTAG;
} else {
aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
aux_tag = ARC_REG_DC_PTAG;
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
* and have @paddr - aligned to cache line and integral @num_lines.
* This however can be avoided for page sized since:
* -@paddr will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized).
*/
if (!full_page) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
vaddr &= CACHE_LINE_MASK;
}
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
/*
* MMUv3, cache ops require paddr in PTAG reg
* if V-P const for loop, PTAG can be written once outside loop
*/
if (full_page)
write_aux_reg(aux_tag, paddr); write_aux_reg(aux_tag, paddr);
#endif
while (num_lines-- > 0) { while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2) if (!full_page) {
/* MMUv3, cache ops require paddr seperately */
if (!full_page_op) {
write_aux_reg(aux_tag, paddr); write_aux_reg(aux_tag, paddr);
paddr += L1_CACHE_BYTES; paddr += L1_CACHE_BYTES;
} }
write_aux_reg(aux_cmd, vaddr); write_aux_reg(aux_cmd, vaddr);
vaddr += L1_CACHE_BYTES; vaddr += L1_CACHE_BYTES;
#else
write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
#endif
} }
} }
#if (CONFIG_ARC_MMU_VER < 3)
#define __cache_line_loop __cache_line_loop_v2
#elif (CONFIG_ARC_MMU_VER == 3)
#define __cache_line_loop __cache_line_loop_v3
#endif
#ifdef CONFIG_ARC_HAS_DCACHE #ifdef CONFIG_ARC_HAS_DCACHE
/*************************************************************** /***************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment