Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
94ecd224
Commit
94ecd224
authored
Aug 16, 2009
by
Paul Mundt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sh: Fix up the SH-5 build with caches enabled.
Signed-off-by:
Paul Mundt
<
lethal@linux-sh.org
>
parent
1ee4ab09
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
64 additions
and
303 deletions
+64
-303
arch/sh/include/asm/system.h
arch/sh/include/asm/system.h
+1
-13
arch/sh/include/asm/system_32.h
arch/sh/include/asm/system_32.h
+10
-0
arch/sh/include/asm/system_64.h
arch/sh/include/asm/system_64.h
+5
-0
arch/sh/kernel/sh_ksyms_64.c
arch/sh/kernel/sh_ksyms_64.c
+0
-8
arch/sh/mm/cache-sh5.c
arch/sh/mm/cache-sh5.c
+21
-228
arch/sh/mm/flush-sh4.c
arch/sh/mm/flush-sh4.c
+27
-54
No files found.
arch/sh/include/asm/system.h
View file @
94ecd224
...
...
@@ -14,18 +14,6 @@
#define AT_VECTOR_SIZE_ARCH 5
/* entries in ARCH_DLINFO */
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define __icbi() \
{ \
unsigned long __addr; \
__addr = 0xa8000000; \
__asm__ __volatile__( \
"icbi %0\n\t" \
:
/* no output */
\
: "m" (__m(__addr))); \
}
#endif
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
...
...
@@ -44,7 +32,7 @@
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("synco": : :"memory")
#define ctrl_barrier() __icbi()
#define ctrl_barrier() __icbi(
0xa8000000
)
#define read_barrier_depends() do { } while(0)
#else
#define mb() __asm__ __volatile__ ("": : :"memory")
...
...
arch/sh/include/asm/system_32.h
View file @
94ecd224
...
...
@@ -63,6 +63,16 @@ do { \
#define __restore_dsp(tsk) do { } while (0)
#endif
#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr) mb()
#endif
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
struct
task_struct
*
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
);
...
...
arch/sh/include/asm/system_64.h
View file @
94ecd224
...
...
@@ -37,6 +37,11 @@ do { \
#define jump_to_uncached() do { } while (0)
#define back_to_cached() do { } while (0)
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static
inline
reg_size_t
register_align
(
void
*
val
)
{
return
(
unsigned
long
long
)(
signed
long
long
)(
signed
long
)
val
;
...
...
arch/sh/kernel/sh_ksyms_64.c
View file @
94ecd224
...
...
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
EXPORT_SYMBOL
(
dump_fpu
);
EXPORT_SYMBOL
(
kernel_thread
);
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU)
EXPORT_SYMBOL
(
clear_user_page
);
#endif
#ifndef CONFIG_CACHE_OFF
EXPORT_SYMBOL
(
flush_dcache_page
);
#endif
#ifdef CONFIG_VT
EXPORT_SYMBOL
(
screen_info
);
#endif
...
...
arch/sh/mm/cache-sh5.c
View file @
94ecd224
...
...
@@ -25,29 +25,6 @@ extern void __weak sh4__flush_region_init(void);
/* Wired TLB entry for the D-cache */
static
unsigned
long
long
dtlb_cache_slot
;
void
__init
cpu_cache_init
(
void
)
{
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot
=
sh64_get_wired_dtlb_entry
();
sh4__flush_region_init
();
}
void
__init
kmap_coherent_init
(
void
)
{
/* XXX ... */
}
void
*
kmap_coherent
(
struct
page
*
page
,
unsigned
long
addr
)
{
/* XXX ... */
return
NULL
;
}
void
kunmap_coherent
(
void
)
{
}
#ifdef CONFIG_DCACHE_DISABLED
#define sh64_dcache_purge_all() do { } while (0)
#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
...
...
@@ -233,52 +210,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
}
}
/*
* Invalidate a small range of user context I-cache, not necessarily page
* (or even cache-line) aligned.
*
* Since this is used inside ptrace, the ASID in the mm context typically
* won't match current_asid. We'll have to switch ASID to do this. For
* safety, and given that the range will be small, do all this under cli.
*
* Note, there is a hazard that the ASID in mm->context is no longer
* actually associated with mm, i.e. if the mm->context has started a new
* cycle since mm was last active. However, this is just a performance
* issue: all that happens is that we invalidate lines belonging to
* another mm, so the owning process has to refill them when that mm goes
* live again. mm itself can't have any cache entries because there will
* have been a flush_cache_all when the new mm->context cycle started.
*/
static
void
sh64_icache_inv_user_small_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
int
len
)
{
unsigned
long
long
eaddr
=
start
;
unsigned
long
long
eaddr_end
=
start
+
len
;
unsigned
long
current_asid
,
mm_asid
;
unsigned
long
flags
;
unsigned
long
long
epage_start
;
/*
* Align to start of cache line. Otherwise, suppose len==8 and
* start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
*/
eaddr
=
L1_CACHE_ALIGN
(
start
);
eaddr_end
=
start
+
len
;
mm_asid
=
cpu_asid
(
smp_processor_id
(),
mm
);
local_irq_save
(
flags
);
current_asid
=
switch_and_save_asid
(
mm_asid
);
epage_start
=
eaddr
&
PAGE_MASK
;
while
(
eaddr
<
eaddr_end
)
{
__asm__
__volatile__
(
"icbi %0, 0"
:
:
"r"
(
eaddr
));
eaddr
+=
L1_CACHE_BYTES
;
}
switch_and_save_asid
(
current_asid
);
local_irq_restore
(
flags
);
}
static
void
sh64_icache_inv_current_user_range
(
unsigned
long
start
,
unsigned
long
end
)
{
/* The icbi instruction never raises ITLBMISS. i.e. if there's not a
...
...
@@ -564,7 +495,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
* Invalidate the entire contents of both caches, after writing back to
* memory any dirty data from the D-cache.
*/
void
flush_cache_all
(
void
)
static
void
sh5_
flush_cache_all
(
void
)
{
sh64_dcache_purge_all
();
sh64_icache_inv_all
();
...
...
@@ -591,7 +522,7 @@ void flush_cache_all(void)
* I-cache. This is similar to the lack of action needed in
* flush_tlb_mm - see fault.c.
*/
void
flush_cache_mm
(
struct
mm_struct
*
mm
)
static
void
sh5_
flush_cache_mm
(
struct
mm_struct
*
mm
)
{
sh64_dcache_purge_all
();
}
...
...
@@ -603,8 +534,8 @@ void flush_cache_mm(struct mm_struct *mm)
*
* Note, 'end' is 1 byte beyond the end of the range to flush.
*/
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
static
void
sh5_flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
...
...
@@ -621,8 +552,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
*
* Note, this is called with pte lock held.
*/
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
eaddr
,
unsigned
long
pfn
)
static
void
sh5_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
eaddr
,
unsigned
long
pfn
)
{
sh64_dcache_purge_phy_page
(
pfn
<<
PAGE_SHIFT
);
...
...
@@ -630,7 +561,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
sh64_icache_inv_user_page
(
vma
,
eaddr
);
}
void
flush_dcache_page
(
struct
page
*
page
)
static
void
sh5_
flush_dcache_page
(
struct
page
*
page
)
{
sh64_dcache_purge_phy_page
(
page_to_phys
(
page
));
wmb
();
...
...
@@ -644,39 +575,20 @@ void flush_dcache_page(struct page *page)
* mapping, therefore it's guaranteed that there no cache entries for
* the range in cache sets of the wrong colour.
*/
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
)
static
void
sh5_
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__flush_purge_region
((
void
*
)
start
,
end
);
wmb
();
sh64_icache_inv_kernel_range
(
start
,
end
);
}
/*
* Flush the range of user (defined by vma->vm_mm) address space starting
* at 'addr' for 'len' bytes from the cache. The range does not straddle
* a page boundary, the unique physical page containing the range is
* 'page'. This seems to be used mainly for invalidating an address
* range following a poke into the program text through the ptrace() call
* from another process (e.g. for BRK instruction insertion).
*/
static
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
)
{
sh64_dcache_purge_coloured_phy_page
(
page_to_phys
(
page
),
addr
);
mb
();
if
(
vma
->
vm_flags
&
VM_EXEC
)
sh64_icache_inv_user_small_range
(
vma
->
vm_mm
,
addr
,
len
);
}
/*
* For the address range [start,end), write back the data from the
* D-cache and invalidate the corresponding region of the I-cache for the
* current process. Used to flush signal trampolines on the stack to
* make them executable.
*/
void
flush_cache_sigtramp
(
unsigned
long
vaddr
)
static
void
sh5_
flush_cache_sigtramp
(
unsigned
long
vaddr
)
{
unsigned
long
end
=
vaddr
+
L1_CACHE_BYTES
;
...
...
@@ -685,138 +597,19 @@ void flush_cache_sigtramp(unsigned long vaddr)
sh64_icache_inv_current_user_range
(
vaddr
,
end
);
}
#ifdef CONFIG_MMU
/*
* These *MUST* lie in an area of virtual address space that's otherwise
* unused.
*/
#define UNIQUE_EADDR_START 0xe0000000UL
#define UNIQUE_EADDR_END 0xe8000000UL
/*
* Given a physical address paddr, and a user virtual address user_eaddr
* which will eventually be mapped to it, create a one-off kernel-private
* eaddr mapped to the same paddr. This is used for creating special
* destination pages for copy_user_page and clear_user_page.
*/
static
unsigned
long
sh64_make_unique_eaddr
(
unsigned
long
user_eaddr
,
unsigned
long
paddr
)
{
static
unsigned
long
current_pointer
=
UNIQUE_EADDR_START
;
unsigned
long
coloured_pointer
;
if
(
current_pointer
==
UNIQUE_EADDR_END
)
{
sh64_dcache_purge_all
();
current_pointer
=
UNIQUE_EADDR_START
;
}
coloured_pointer
=
(
current_pointer
&
~
CACHE_OC_SYN_MASK
)
|
(
user_eaddr
&
CACHE_OC_SYN_MASK
);
sh64_setup_dtlb_cache_slot
(
coloured_pointer
,
get_asid
(),
paddr
);
current_pointer
+=
(
PAGE_SIZE
<<
CACHE_OC_N_SYNBITS
);
return
coloured_pointer
;
}
static
void
sh64_copy_user_page_coloured
(
void
*
to
,
void
*
from
,
unsigned
long
address
)
void
__init
sh5_cache_init
(
void
)
{
void
*
coloured_to
;
/*
* Discard any existing cache entries of the wrong colour. These are
* present quite often, if the kernel has recently used the page
* internally, then given it up, then it's been allocated to the user.
*/
sh64_dcache_purge_coloured_phy_page
(
__pa
(
to
),
(
unsigned
long
)
to
);
coloured_to
=
(
void
*
)
sh64_make_unique_eaddr
(
address
,
__pa
(
to
));
copy_page
(
from
,
coloured_to
);
sh64_teardown_dtlb_cache_slot
();
}
static
void
sh64_clear_user_page_coloured
(
void
*
to
,
unsigned
long
address
)
{
void
*
coloured_to
;
/*
* Discard any existing kernel-originated lines of the wrong
* colour (as above)
*/
sh64_dcache_purge_coloured_phy_page
(
__pa
(
to
),
(
unsigned
long
)
to
);
coloured_to
=
(
void
*
)
sh64_make_unique_eaddr
(
address
,
__pa
(
to
));
clear_page
(
coloured_to
);
flush_cache_all
=
sh5_flush_cache_all
;
flush_cache_mm
=
sh5_flush_cache_mm
;
flush_cache_dup_mm
=
sh5_flush_cache_mm
;
flush_cache_page
=
sh5_flush_cache_page
;
flush_cache_range
=
sh5_flush_cache_range
;
flush_dcache_page
=
sh5_flush_dcache_page
;
flush_icache_range
=
sh5_flush_icache_range
;
flush_cache_sigtramp
=
sh5_flush_cache_sigtramp
;
sh64_teardown_dtlb_cache_slot
();
}
/*
* 'from' and 'to' are kernel virtual addresses (within the superpage
* mapping of the physical RAM). 'address' is the user virtual address
* where the copy 'to' will be mapped after. This allows a custom
* mapping to be used to ensure that the new copy is placed in the
* right cache sets for the user to see it without having to bounce it
* out via memory. Note however : the call to flush_page_to_ram in
* (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
* very important case!
*
* TBD : can we guarantee that on every call, any cache entries for
* 'from' are in the same colour sets as 'address' also? i.e. is this
* always used just to deal with COW? (I suspect not).
*
* There are two possibilities here for when the page 'from' was last accessed:
* - by the kernel : this is OK, no purge required.
* - by the/a user (e.g. for break_COW) : need to purge.
*
* If the potential user mapping at 'address' is the same colour as
* 'from' there is no need to purge any cache lines from the 'from'
* page mapped into cache sets of colour 'address'. (The copy will be
* accessing the page through 'from').
*/
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
address
,
struct
page
*
page
)
{
if
(((
address
^
(
unsigned
long
)
from
)
&
CACHE_OC_SYN_MASK
)
!=
0
)
sh64_dcache_purge_coloured_phy_page
(
__pa
(
from
),
address
);
if
(((
address
^
(
unsigned
long
)
to
)
&
CACHE_OC_SYN_MASK
)
==
0
)
copy_page
(
to
,
from
);
else
sh64_copy_user_page_coloured
(
to
,
from
,
address
);
}
/*
* 'to' is a kernel virtual address (within the superpage mapping of the
* physical RAM). 'address' is the user virtual address where the 'to'
* page will be mapped after. This allows a custom mapping to be used to
* ensure that the new copy is placed in the right cache sets for the
* user to see it without having to bounce it out via memory.
*/
void
clear_user_page
(
void
*
to
,
unsigned
long
address
,
struct
page
*
page
)
{
if
(((
address
^
(
unsigned
long
)
to
)
&
CACHE_OC_SYN_MASK
)
==
0
)
clear_page
(
to
);
else
sh64_clear_user_page_coloured
(
to
,
address
);
}
void
copy_to_user_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
vaddr
,
void
*
dst
,
const
void
*
src
,
unsigned
long
len
)
{
flush_cache_page
(
vma
,
vaddr
,
page_to_pfn
(
page
));
memcpy
(
dst
,
src
,
len
);
flush_icache_user_range
(
vma
,
page
,
vaddr
,
len
);
}
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot
=
sh64_get_wired_dtlb_entry
();
void
copy_from_user_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
vaddr
,
void
*
dst
,
const
void
*
src
,
unsigned
long
len
)
{
flush_cache_page
(
vma
,
vaddr
,
page_to_pfn
(
page
));
memcpy
(
dst
,
src
,
len
);
sh4__flush_region_init
();
}
#endif
arch/sh/mm/flush-sh4.c
View file @
94ecd224
...
...
@@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
}
while
(
cnt
)
{
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
--
;
}
}
...
...
@@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
}
while
(
cnt
)
{
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
--
;
}
}
...
...
@@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
}
while
(
cnt
)
{
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
--
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment