Commit 70ddb63a authored by Joonwoo Park's avatar Joonwoo Park Committed by Will Deacon

arm64: optimize memcpy_{from,to}io() and memset_io()

Optimize memcpy_{from,to}io() and memset_io() by transferring in 64 bit
as much as possible with minimized barrier usage.  This simplest
optimization brings faster throughput compare to current byte-by-byte read
and write with barrier in the loop.  Code's skeleton is taken from the
powerpc.

Link: http://lkml.kernel.org/p/20141020133304.GH23751@e104818-lin.cambridge.arm.comReviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarTrilok Soni <tsoni@codeaurora.org>
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 4ee20980
...@@ -25,12 +25,26 @@ ...@@ -25,12 +25,26 @@
*/ */
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{ {
unsigned char *t = to; while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
while (count) { !IS_ALIGNED((unsigned long)to, 8))) {
*(u8 *)to = __raw_readb(from);
from++;
to++;
count--; count--;
*t = readb(from); }
t++;
while (count >= 8) {
*(u64 *)to = __raw_readq(from);
from += 8;
to += 8;
count -= 8;
}
while (count) {
*(u8 *)to = __raw_readb(from);
from++; from++;
to++;
count--;
} }
} }
EXPORT_SYMBOL(__memcpy_fromio); EXPORT_SYMBOL(__memcpy_fromio);
...@@ -40,12 +54,26 @@ EXPORT_SYMBOL(__memcpy_fromio); ...@@ -40,12 +54,26 @@ EXPORT_SYMBOL(__memcpy_fromio);
*/ */
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{ {
const unsigned char *f = from; while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
while (count) { !IS_ALIGNED((unsigned long)from, 8))) {
__raw_writeb(*(volatile u8 *)from, to);
from++;
to++;
count--; count--;
writeb(*f, to); }
f++;
while (count >= 8) {
__raw_writeq(*(volatile u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
__raw_writeb(*(volatile u8 *)from, to);
from++;
to++; to++;
count--;
} }
} }
EXPORT_SYMBOL(__memcpy_toio); EXPORT_SYMBOL(__memcpy_toio);
...@@ -55,10 +83,28 @@ EXPORT_SYMBOL(__memcpy_toio); ...@@ -55,10 +83,28 @@ EXPORT_SYMBOL(__memcpy_toio);
*/ */
void __memset_io(volatile void __iomem *dst, int c, size_t count) void __memset_io(volatile void __iomem *dst, int c, size_t count)
{ {
while (count) { u64 qc = (u8)c;
qc |= qc << 8;
qc |= qc << 16;
qc |= qc << 32;
while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
__raw_writeb(c, dst);
dst++;
count--; count--;
writeb(c, dst); }
while (count >= 8) {
__raw_writeq(qc, dst);
dst += 8;
count -= 8;
}
while (count) {
__raw_writeb(c, dst);
dst++; dst++;
count--;
} }
} }
EXPORT_SYMBOL(__memset_io); EXPORT_SYMBOL(__memset_io);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment