Commit 1c4a7135 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] PPC32: Fix copy prefetch on non coherent PPCs

From: Matt Porter <mporter@kernel.crashing.org>

This patch fixes the condition where prefetching cache lines beyond a
buffer can cause data corruption on non cache coherent PPCs.  It is a port
of the version that went into 2.4.  From Eugene Surovegin
<ebs@ebshome.net>.
parent 4bab667f
......@@ -780,9 +780,18 @@ _GLOBAL(clear_pages)
_GLOBAL(copy_page)
addi r3,r3,-4
addi r4,r4,-4
#ifdef CONFIG_8xx
/* don't use prefetch on 8xx */
li r0,4096/L1_CACHE_LINE_SIZE
mtctr r0
1: COPY_16_BYTES
bdnz 1b
blr
#else /* not 8xx, we can prefetch */
li r5,4
#ifndef CONFIG_8xx
#if MAX_COPY_PREFETCH > 1
li r0,MAX_COPY_PREFETCH
li r11,4
......@@ -790,19 +799,17 @@ _GLOBAL(copy_page)
11: dcbt r11,r4
addi r11,r11,L1_CACHE_LINE_SIZE
bdnz 11b
#else /* MAX_L1_COPY_PREFETCH == 1 */
#else /* MAX_COPY_PREFETCH == 1 */
dcbt r5,r4
li r11,L1_CACHE_LINE_SIZE+4
#endif /* MAX_L1_COPY_PREFETCH */
#endif /* CONFIG_8xx */
li r0,4096/L1_CACHE_LINE_SIZE
#endif /* MAX_COPY_PREFETCH */
li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
crclr 4*cr0+eq
2:
mtctr r0
1:
#ifndef CONFIG_8xx
dcbt r11,r4
dcbz r5,r3
#endif
COPY_16_BYTES
#if L1_CACHE_LINE_SIZE >= 32
COPY_16_BYTES
......@@ -818,7 +825,12 @@ _GLOBAL(copy_page)
#endif
#endif
bdnz 1b
blr
beqlr
crnot 4*cr0+eq,4*cr0+eq
li r0,MAX_COPY_PREFETCH
li r11,4
b 2b
#endif /* CONFIG_8xx */
/*
* void atomic_clear_mask(atomic_t mask, atomic_t *addr)
......
......@@ -436,48 +436,57 @@ _GLOBAL(__copy_tofrom_user)
73: stwu r9,4(r6)
bdnz 72b
.section __ex_table,"a"
.align 2
.long 70b,100f
.long 71b,101f
.long 72b,102f
.long 73b,103f
.text
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
clrlwi r5,r5,32-LG_CACHELINE_BYTES
li r11,4
beq 63f
#if !defined(CONFIG_8xx)
#ifdef CONFIG_8xx
/* Don't use prefetch on 8xx */
mtctr r0
53: COPY_16_BYTES_WITHEX(0)
bdnz 53b
#else /* not CONFIG_8xx */
/* Here we decide how far ahead to prefetch the source */
li r3,4
cmpwi r0,1
li r7,0
ble 114f
li r7,1
#if MAX_COPY_PREFETCH > 1
/* Heuristically, for large transfers we prefetch
MAX_COPY_PREFETCH cachelines ahead. For small transfers
we prefetch 1 cacheline ahead. */
cmpwi r0,MAX_COPY_PREFETCH
li r7,1
li r3,4
ble 111f
ble 112f
li r7,MAX_COPY_PREFETCH
111: mtctr r7
112: dcbt r3,r4
112: mtctr r7
111: dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
bdnz 112b
#else /* MAX_COPY_PREFETCH == 1 */
li r3,CACHELINE_BYTES + 4
dcbt r11,r4
#endif /* MAX_COPY_PREFETCH */
#endif /* CONFIG_8xx */
mtctr r0
53:
#if !defined(CONFIG_8xx)
bdnz 111b
#else
dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
#endif /* MAX_COPY_PREFETCH > 1 */
114: subf r8,r7,r0
mr r0,r7
mtctr r8
53: dcbt r3,r4
54: dcbz r11,r6
#endif
/* had to move these to keep extable in order */
.section __ex_table,"a"
.align 2
.long 70b,100f
.long 71b,101f
.long 72b,102f
.long 73b,103f
#if !defined(CONFIG_8xx)
.long 54b,105f
#endif
.text
/* the main body of the cacheline loop */
COPY_16_BYTES_WITHEX(0)
......@@ -495,6 +504,11 @@ _GLOBAL(__copy_tofrom_user)
#endif
#endif
bdnz 53b
cmpwi r0,0
li r3,4
li r7,0
bne 114b
#endif /* CONFIG_8xx */
63: srwi. r0,r5,2
mtctr r0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment