Commit 2657fd8f authored by David S. Miller's avatar David S. Miller

[SPARC64]: Revamped memcpy infrastructure.

- Make it easier to maintain the Ultra-I vs. Ultra-III
  memcpy implementations.  Before you had to maintain
  3 different entire copies of the routines.
- Kill %asi register writing Ultra-I single memcpy loop
  for both user and kernel.  Was not worth it.
- Simplify exception detection and handling enormously.
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent d360f1ee
...@@ -334,7 +334,6 @@ EXPORT_SYMBOL(sys_close); ...@@ -334,7 +334,6 @@ EXPORT_SYMBOL(sys_close);
#endif #endif
/* Special internal versions of library functions. */ /* Special internal versions of library functions. */
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(_clear_page); EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page); EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(copy_user_page); EXPORT_SYMBOL(copy_user_page);
...@@ -343,7 +342,7 @@ EXPORT_SYMBOL(__memscan_zero); ...@@ -343,7 +342,7 @@ EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic); EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(__memcmp); EXPORT_SYMBOL(__memcmp);
EXPORT_SYMBOL(__strncmp); EXPORT_SYMBOL(__strncmp);
EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
...@@ -351,9 +350,12 @@ EXPORT_SYMBOL(csum_partial_copy_sparc64); ...@@ -351,9 +350,12 @@ EXPORT_SYMBOL(csum_partial_copy_sparc64);
EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(ip_fast_csum);
/* Moving data to/from/in userspace. */ /* Moving data to/from/in userspace. */
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(___copy_to_user);
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(__copy_in_user); EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(copy_to_user_fixup);
EXPORT_SYMBOL(copy_from_user_fixup);
EXPORT_SYMBOL(copy_in_user_fixup);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__bzero_noasi); EXPORT_SYMBOL(__bzero_noasi);
......
...@@ -7,11 +7,12 @@ EXTRA_CFLAGS := -Werror ...@@ -7,11 +7,12 @@ EXTRA_CFLAGS := -Werror
lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \ memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \ VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \
VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \ VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \
U3memcpy.o U3copy_from_user.o U3copy_to_user.o \ U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
U3copy_in_user.o mcount.o ipcsum.o rwsem.o xor.o splock.o \ U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
find_bit.o copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o splock.o find_bit.o
lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
/* U1copy_from_user.S: UltraSparc-I/II/IIi/IIe optimized copy from userspace.
*
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
#define EX_LD(x) \
98: x; \
.section .fixup; \
.align 4; \
99: retl; \
mov 1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define FUNC_NAME ___copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
#define EX_RETVAL(x) 0
#include "U1memcpy.S"
/* U1copy_to_user.S: UltraSparc-I/II/IIi/IIe optimized copy to userspace.
*
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
#define EX_ST(x) \
98: x; \
.section .fixup; \
.align 4; \
99: retl; \
mov 1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define FUNC_NAME ___copy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
#define EX_RETVAL(x) 0
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, memcpy_user_stub; \
nop; \
#include "U1memcpy.S"
/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
*
* Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#else
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#ifdef MEMCPY_DEBUG
#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#else
#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#endif
#endif
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef LOAD_BLK
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef STORE_BLK
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
#endif
#ifndef FUNC_NAME
#define FUNC_NAME memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
faligndata %f1, %f2, %f48; \
faligndata %f2, %f3, %f50; \
faligndata %f3, %f4, %f52; \
faligndata %f4, %f5, %f54; \
faligndata %f5, %f6, %f56; \
faligndata %f6, %f7, %f58; \
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
EX_LD(LOAD_BLK(%src, %fdest)); \
EX_ST(STORE_BLK(%fsrc, %dest)); \
add %src, 0x40, %src; \
subcc %len, 0x40, %len; \
be,pn %xcc, jmptgt; \
add %dest, 0x40, %dest; \
#define LOOP_CHUNK1(src, dest, len, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
#define LOOP_CHUNK2(src, dest, len, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
#define LOOP_CHUNK3(src, dest, len, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
#define STORE_SYNC(dest, fsrc) \
EX_ST(STORE_BLK(%fsrc, %dest)); \
add %dest, 0x40, %dest;
#define STORE_JUMP(dest, fsrc, target) \
EX_ST(STORE_BLK(%fsrc, %dest)); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target;
#define FINISH_VISCHUNK(dest, f0, f1, left) \
subcc %left, 8, %left;\
bl,pn %xcc, 95f; \
faligndata %f0, %f1, %f48; \
EX_ST(STORE(std, %f48, %dest)); \
add %dest, 8, %dest;
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
subcc %left, 8, %left; \
bl,pn %xcc, 95f; \
fsrc1 %f0, %f1;
#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
ba,a,pt %xcc, 93f;
.register %g2,#scratch
.register %g3,#scratch
.text
.align 64
.globl FUNC_NAME
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
PREAMBLE
mov %o0, %g5
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
blu,a,pn %XCC, 80f
or %o3, %o2, %o3
cmp %o2, (5 * 64)
blu,pt %XCC, 70f
andcc %o3, 0x7, %g0
/* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
VISEntry
/* Is 'dst' already aligned on an 64-byte boundary? */
andcc %o0, 0x3f, %g2
be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
sub %o0, %o1, %o4
sub %g2, 0x40, %g2
sub %g0, %g2, %g2
sub %o2, %g2, %o2
andcc %g2, 0x7, %g1
be,pt %icc, 2f
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
EX_ST(STORE(stb, %o3, %o1 + %o4))
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
add %o1, %o4, %o0
2: cmp %g2, 0x0
and %o1, 0x7, %g1
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
EX_LD(LOAD(ldd, %o1, %f4))
1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
EX_ST(STORE(std, %f0, %o0))
be,pn %icc, 3f
add %o0, 0x8, %o0
EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
EX_ST(STORE(std, %f0, %o0))
bne,pt %icc, 1b
add %o0, 0x8, %o0
/* Destination is 64-byte aligned. */
3:
membar #LoadStore | #StoreStore | #StoreLoad
subcc %o2, 0x40, %o4
add %o1, %g1, %g1
andncc %o4, (0x40 - 1), %o4
srl %g1, 3, %g2
sub %o2, %o4, %g3
andn %o1, (0x40 - 1), %o1
and %g2, 7, %g2
andncc %g3, 0x7, %g3
fmovd %f0, %f2
sub %o2, %o4, %o2
add %g1, %o4, %g1
subcc %o2, %g3, %o2
EX_LD(LOAD_BLK(%o1, %f0))
add %o1, 0x40, %o1
add %g1, %g3, %g1
EX_LD(LOAD_BLK(%o1, %f16))
add %o1, 0x40, %o1
sub %o4, 0x80, %o4
EX_LD(LOAD_BLK(%o1, %f32))
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
* one for each possible alignment of the
* source buffer. Each loop instance is 452
* bytes.
*/
sll %g2, 3, %o3
sub %o3, %g2, %o3
sllx %o3, 4, %o3
add %o3, %g2, %o3
sllx %o3, 2, %g2
1: rd %pc, %o3
add %o3, %lo(1f - 1b), %o3
jmpl %o3 + %g2, %g0
nop
.align 64
1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f0, %f2, %f48
1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
STORE_JUMP(o0, f48, 40f) membar #Sync
2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
STORE_JUMP(o0, f48, 48f) membar #Sync
3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
STORE_JUMP(o0, f48, 56f) membar #Sync
1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f2, %f4, %f48
1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
STORE_JUMP(o0, f48, 41f) membar #Sync
2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
STORE_JUMP(o0, f48, 49f) membar #Sync
3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
STORE_JUMP(o0, f48, 57f) membar #Sync
1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f4, %f6, %f48
1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
STORE_JUMP(o0, f48, 42f) membar #Sync
2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
STORE_JUMP(o0, f48, 50f) membar #Sync
3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
STORE_JUMP(o0, f48, 58f) membar #Sync
1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f6, %f8, %f48
1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
STORE_JUMP(o0, f48, 43f) membar #Sync
2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
STORE_JUMP(o0, f48, 51f) membar #Sync
3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
STORE_JUMP(o0, f48, 59f) membar #Sync
1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f8, %f10, %f48
1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
STORE_JUMP(o0, f48, 44f) membar #Sync
2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
STORE_JUMP(o0, f48, 52f) membar #Sync
3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
STORE_JUMP(o0, f48, 60f) membar #Sync
1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f10, %f12, %f48
1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
STORE_JUMP(o0, f48, 45f) membar #Sync
2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
STORE_JUMP(o0, f48, 53f) membar #Sync
3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
STORE_JUMP(o0, f48, 61f) membar #Sync
1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f12, %f14, %f48
1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
STORE_JUMP(o0, f48, 46f) membar #Sync
2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
STORE_JUMP(o0, f48, 54f) membar #Sync
3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
STORE_JUMP(o0, f48, 62f) membar #Sync
1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
LOOP_CHUNK1(o1, o0, o4, 1f)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
LOOP_CHUNK2(o1, o0, o4, 2f)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
LOOP_CHUNK3(o1, o0, o4, 3f)
ba,pt %xcc, 1b+4
faligndata %f14, %f16, %f48
1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
STORE_JUMP(o0, f48, 47f) membar #Sync
2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
STORE_JUMP(o0, f48, 55f) membar #Sync
3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_JUMP(o0, f48, 63f) membar #Sync
40: FINISH_VISCHUNK(o0, f0, f2, g3)
41: FINISH_VISCHUNK(o0, f2, f4, g3)
42: FINISH_VISCHUNK(o0, f4, f6, g3)
43: FINISH_VISCHUNK(o0, f6, f8, g3)
44: FINISH_VISCHUNK(o0, f8, f10, g3)
45: FINISH_VISCHUNK(o0, f10, f12, g3)
46: FINISH_VISCHUNK(o0, f12, f14, g3)
47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
48: FINISH_VISCHUNK(o0, f16, f18, g3)
49: FINISH_VISCHUNK(o0, f18, f20, g3)
50: FINISH_VISCHUNK(o0, f20, f22, g3)
51: FINISH_VISCHUNK(o0, f22, f24, g3)
52: FINISH_VISCHUNK(o0, f24, f26, g3)
53: FINISH_VISCHUNK(o0, f26, f28, g3)
54: FINISH_VISCHUNK(o0, f28, f30, g3)
55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
56: FINISH_VISCHUNK(o0, f32, f34, g3)
57: FINISH_VISCHUNK(o0, f34, f36, g3)
58: FINISH_VISCHUNK(o0, f36, f38, g3)
59: FINISH_VISCHUNK(o0, f38, f40, g3)
60: FINISH_VISCHUNK(o0, f40, f42, g3)
61: FINISH_VISCHUNK(o0, f42, f44, g3)
62: FINISH_VISCHUNK(o0, f44, f46, g3)
63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
93: EX_LD(LOAD(ldd, %o1, %f2))
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
EX_ST(STORE(std, %f8, %o0))
bl,pn %xcc, 95f
add %o0, 8, %o0
EX_LD(LOAD(ldd, %o1, %f0))
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
EX_ST(STORE(std, %f8, %o0))
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
1: EX_LD(LOAD(ldub, %o1, %o3))
add %o1, 1, %o1
subcc %o2, 1, %o2
EX_ST(STORE(stb, %o3, %o0))
bne,pt %xcc, 1b
add %o0, 1, %o0
2: membar #StoreLoad | #StoreStore
VISExit
retl
mov %g5, %o0
.align 64
70: /* 16 < len <= (5 * 64) */
bne,pn %XCC, 75f
sub %o0, %o1, %o3
72: andn %o2, 0xf, %o4
and %o2, 0xf, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
subcc %o4, 0x10, %o4
EX_ST(STORE(stx, %o5, %o1 + %o3))
add %o1, 0x8, %o1
EX_ST(STORE(stx, %g1, %o1 + %o3))
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
EX_LD(LOAD(ldx, %o1, %o5))
sub %o2, 0x8, %o2
EX_ST(STORE(stx, %o5, %o1 + %o3))
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
EX_LD(LOAD(lduw, %o1, %o5))
sub %o2, 0x4, %o2
EX_ST(STORE(stw, %o5, %o1 + %o3))
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
75: andcc %o0, 0x7, %g1
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
sub %o2, %g1, %o2
1: EX_LD(LOAD(ldub, %o1, %o5))
subcc %g1, 1, %g1
EX_ST(STORE(stb, %o5, %o1 + %o3))
bgu,pt %icc, 1b
add %o1, 1, %o1
2: add %o1, %o3, %o0
andcc %o1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
cmp %o2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
8: mov 64, %o3
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1, %g2))
sub %o3, %g1, %o3
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
subcc %o4, 0x8, %o4
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
EX_ST(STORE(stx, %o5, %o0))
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
andcc %o2, 0x7, %o2
be,pn %icc, 85f
add %o1, %g1, %o1
ba,pt %xcc, 90f
sub %o0, %o1, %o3
.align 64
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1: EX_LD(LOAD(lduw, %o1, %g1))
subcc %o2, 4, %o2
EX_ST(STORE(stw, %g1, %o1 + %o3))
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
mov %g5, %o0
.align 32
90: EX_LD(LOAD(ldub, %o1, %g1))
subcc %o2, 1, %o2
EX_ST(STORE(stb, %g1, %o1 + %o3))
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
mov %g5, %o0
...@@ -3,410 +3,20 @@ ...@@ -3,410 +3,20 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/ */
#include <asm/visasm.h> #define EX_LD(x) \
#include <asm/asi.h> 98: x; \
#include <asm/dcu.h>
#include <asm/spitfire.h>
#define XCC xcc
#define EXNV_RAW(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: add %o1, %o3, %o0; \
ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: add %o1, %o3, %o0; \
a, b, %o1; \
ba U3cfu_fixup; \
add %o1, 4, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV8(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: add %o1, %o3, %o0; \
a, b, %o1; \
ba U3cfu_fixup; \
add %o1, 8, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EX2(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
and %o2, (0x40 - 1), %o1; \
add %o1, %o4, %o1; \
ba U3cfu_fixup; \
add %o1, 0x1c0, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EX3(x,y) \
98: x,y; \
.section .fixup; \ .section .fixup; \
.align 4; \ .align 4; \
99: VISExitHalf; \ 99: retl; \
and %o2, (0x40 - 1), %o1; \ mov 1, %o0; \
sll %g3, 6, %g3; \
add %o1, 0x80, %o1; \
ba U3cfu_fixup; \
add %o1, %g3, %o1; \
.section __ex_table; \ .section __ex_table; \
.align 4; \ .align 4; \
.word 98b, 99b; \ .word 98b, 99b; \
.text; \ .text; \
.align 4; .align 4;
#define EX4(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
and %o2, (0x40 - 1), %o1; \
add %o1, 0x40, %o1; \
ba U3cfu_fixup; \
add %o1, %g3, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
.register %g2,#scratch
.register %g3,#scratch
/* Special/non-trivial issues of this code:
*
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
* 2) Only low 32 FPU registers are used so that only the
* lower half of the FPU register set is dirtied by this
* code. This is especially important in the kernel.
* 3) This code never prefetches cachelines past the end
* of the source buffer.
*/
.text
.align 32
/* The cheetah's flexible spine, oversized liver, enlarged heart,
* slender muscular body, and claws make it the swiftest hunter
* in Africa and the fastest animal on land. Can reach speeds
* of up to 2.4GB per second.
*/
.globl U3copy_from_user
U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
bleu,a,pn %XCC, 80f
or %o3, %o2, %o3
cmp %o2, 256
blu,pt %XCC, 70f
andcc %o3, 0x7, %g0
ba,pt %xcc, 1f
andcc %o0, 0x3f, %g2
/* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
1:
/* Is 'dst' already aligned on an 64-byte boundary? */
be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
sub %g2, 0x40, %g2
sub %g0, %g2, %g2
sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */
1: EXNV_RAW(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
add %o1, 0x1, %o1
add %o0, 0x1, %o0
subcc %g2, 0x1, %g2
bg,pt %XCC, 1b
stb %o3, [%o0 + -1]
2: VISEntryHalf
and %o1, 0x7, %g1
ba,pt %xcc, 1f
alignaddr %o1, %g0, %o1
.align 64
1:
membar #StoreLoad | #StoreStore | #LoadStore
prefetcha [%o1 + 0x000] %asi, #one_read
prefetcha [%o1 + 0x040] %asi, #one_read
andn %o2, (0x40 - 1), %o4
prefetcha [%o1 + 0x080] %asi, #one_read
prefetcha [%o1 + 0x0c0] %asi, #one_read
EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0)
prefetcha [%o1 + 0x100] %asi, #one_read
EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0)
prefetcha [%o1 + 0x140] %asi, #one_read
EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0)
prefetcha [%o1 + 0x180] %asi, #one_read
faligndata %f0, %f2, %f16
EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0)
faligndata %f2, %f4, %f18
EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0)
faligndata %f4, %f6, %f20
EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0)
faligndata %f6, %f8, %f22
EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0)
faligndata %f8, %f10, %f24
EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0)
faligndata %f10, %f12, %f26
EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0)
sub %o4, 0x80, %o4
add %o1, 0x40, %o1
ba,pt %xcc, 1f
srl %o4, 6, %o3
.align 64
1:
EX3(ldda [%o1 + 0x008] %asi, %f2)
faligndata %f12, %f14, %f28
EX3(ldda [%o1 + 0x010] %asi, %f4)
faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P
EX3(ldda [%o1 + 0x018] %asi, %f6)
faligndata %f0, %f2, %f16
EX3(ldda [%o1 + 0x020] %asi, %f8)
faligndata %f2, %f4, %f18
EX3(ldda [%o1 + 0x028] %asi, %f10)
faligndata %f4, %f6, %f20
EX3(ldda [%o1 + 0x030] %asi, %f12)
faligndata %f6, %f8, %f22
EX3(ldda [%o1 + 0x038] %asi, %f14)
faligndata %f8, %f10, %f24
EX3(ldda [%o1 + 0x040] %asi, %f0)
prefetcha [%o1 + 0x180] %asi, #one_read
faligndata %f10, %f12, %f26
subcc %o3, 0x01, %o3
add %o1, 0x40, %o1
bg,pt %XCC, 1b
add %o0, 0x40, %o0
/* Finally we copy the last full 64-byte block. */
EX3(ldda [%o1 + 0x008] %asi, %f2)
faligndata %f12, %f14, %f28
EX3(ldda [%o1 + 0x010] %asi, %f4)
faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P
EX3(ldda [%o1 + 0x018] %asi, %f6)
faligndata %f0, %f2, %f16
EX3(ldda [%o1 + 0x020] %asi, %f8)
faligndata %f2, %f4, %f18
EX3(ldda [%o1 + 0x028] %asi, %f10)
faligndata %f4, %f6, %f20
EX3(ldda [%o1 + 0x030] %asi, %f12)
faligndata %f6, %f8, %f22
EX3(ldda [%o1 + 0x038] %asi, %f14)
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
EX4(ldda [%o1 + 0x040] %asi, %f0)
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
* load past the end of the src buffer.
*/
and %o2, 0x3f, %o2
andcc %o2, 0x38, %g2
be,pn %XCC, 10f
subcc %g2, 0x8, %g2
be,pn %XCC, 10f
cmp %g1, 0
be,a,pt %XCC, 1f
EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0)
1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0)
add %o1, 0x8, %o1
sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
std %f8, [%o0 + 0x00]
be,pn %XCC, 10f
add %o0, 0x8, %o0
EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0)
add %o1, 0x8, %o1
sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
std %f8, [%o0 + 0x00]
bne,pn %XCC, 1b
add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
10:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
be,pn %XCC, 85f
sub %o0, %o1, %o3
andcc %g1, 0x7, %g0
bne,pn %icc, 90f
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
EXNV(ldxa [%o1] %asi, %o5, add %o2, %g0)
stx %o5, [%o1 + %o3]
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
EXNV(lduwa [%o1] %asi, %o5, and %o2, 0x7)
stw %o5, [%o1 + %o3]
add %o1, 0x4, %o1
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
EXNV(lduha [%o1] %asi, %o5, and %o2, 0x3)
sth %o5, [%o1 + %o3]
add %o1, 0x2, %o1
1: andcc %o2, 0x1, %g0
be,pt %icc, 85f
nop
EXNV(lduba [%o1] %asi, %o5, and %o2, 0x1)
ba,pt %xcc, 85f
stb %o5, [%o1 + %o3]
70: /* 16 < len <= 64 */
bne,pn %XCC, 90f
sub %o0, %o1, %o3
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
EXNV8(ldxa [%o1] %asi, %o5, add %o2, %o4)
stx %o5, [%o1 + %o3]
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
EXNV4(lduwa [%o1] %asi, %o5, add %o2, %g0)
stw %o5, [%o1 + %o3]
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1:
subcc %o2, 4, %o2
EXNV(lduwa [%o1] %asi, %g1, add %o2, %g0)
stw %g1, [%o1 + %o3]
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
clr %o0
.align 32
90:
subcc %o2, 1, %o2
EXNV(lduba [%o1] %asi, %g1, add %o2, %g0)
stb %g1, [%o1 + %o3]
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
clr %o0
U3cfu_fixup:
/* Since this is copy_from_user(), zero out the rest of the
* kernel buffer.
*/
cmp %o1, 0
ble,pn %icc, 2f
mov %o1, %g2
1: subcc %g2, 1, %g2 #define FUNC_NAME U3copy_from_user
stb %g0, [%o0] #define LOAD(type,addr,dest) type##a [addr] %asi, dest
bne,pt %icc, 1b #define EX_RETVAL(x) 0
add %o0, 1, %o0
2: retl #include "U3memcpy.S"
mov %o1, %o0
/* U3copy_to_user.S: UltraSparc-III optimized memcpy. /* U3copy_to_user.S: UltraSparc-III optimized copy to userspace.
* *
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/ */
#include <asm/visasm.h> #define EX_ST(x) \
#include <asm/asi.h> 98: x; \
#include <asm/dcu.h>
#include <asm/spitfire.h>
#define XCC xcc
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \ .section .fixup; \
.align 4; \ .align 4; \
99: retl; \ 99: retl; \
a, b, %o0; \ mov 1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV2(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
add %o0, 1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV3(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
add %o0, 4, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
add %o0, 8, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
retl; \
a, b, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXBLK1(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
add %o4, 0x1c0, %o1; \
and %o2, (0x40 - 1), %o2; \
retl; \
add %o1, %o2, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXBLK2(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
sll %o3, 6, %o3; \
and %o2, (0x40 - 1), %o2; \
add %o3, 0x80, %o1; \
retl; \
add %o1, %o2, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXBLK3(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
and %o2, (0x40 - 1), %o2; \
retl; \
add %o2, 0x80, %o0; \
.section __ex_table; \ .section __ex_table; \
.align 4; \ .align 4; \
.word 98b, 99b; \ .word 98b, 99b; \
.text; \ .text; \
.align 4; .align 4;
#define EXBLK4(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: VISExitHalf; \
and %o2, (0x40 - 1), %o2; \
retl; \
add %o2, 0x40, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
.register %g2,#scratch
.register %g3,#scratch
/* Special/non-trivial issues of this code:
*
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
* 2) Only low 32 FPU registers are used so that only the
* lower half of the FPU register set is dirtied by this
* code. This is especially important in the kernel.
* 3) This code never prefetches cachelines past the end
* of the source buffer.
*/
.text #define FUNC_NAME U3copy_to_user
.align 32 #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
#define EX_RETVAL(x) 0
/* The cheetah's flexible spine, oversized liver, enlarged heart,
* slender muscular body, and claws make it the swiftest hunter
* in Africa and the fastest animal on land. Can reach speeds
* of up to 2.4GB per second.
*/
.globl U3copy_to_user
U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it. /* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively * Reading %asi to check for KERNEL_DS is comparatively
* cheap. * cheap.
*/ */
rd %asi, %g1 #define PREAMBLE \
cmp %g1, ASI_AIUS rd %asi, %g1; \
bne,pn %icc, U3memcpy_user_stub cmp %g1, ASI_AIUS; \
nop bne,pn %icc, memcpy_user_stub; \
nop; \
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
bleu,a,pn %XCC, 80f
or %o3, %o2, %o3
cmp %o2, 256
blu,pt %XCC, 70f
andcc %o3, 0x7, %g0
ba,pt %xcc, 1f
andcc %o0, 0x3f, %g2
/* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
1:
/* Is 'dst' already aligned on an 64-byte boundary? */
be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
sub %g2, 0x40, %g2
sub %g0, %g2, %g2
sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */
1: ldub [%o1 + 0x00], %o3
add %o1, 0x1, %o1
add %o0, 0x1, %o0
subcc %g2, 0x1, %g2
bg,pt %XCC, 1b
EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
2: VISEntryHalf
and %o1, 0x7, %g1
ba,pt %xcc, 1f
alignaddr %o1, %g0, %o1
.align 64
1:
membar #StoreLoad | #StoreStore | #LoadStore
prefetch [%o1 + 0x000], #one_read
prefetch [%o1 + 0x040], #one_read
andn %o2, (0x40 - 1), %o4
prefetch [%o1 + 0x080], #one_read
prefetch [%o1 + 0x0c0], #one_read
ldd [%o1 + 0x000], %f0
prefetch [%o1 + 0x100], #one_read
ldd [%o1 + 0x008], %f2
prefetch [%o1 + 0x140], #one_read
ldd [%o1 + 0x010], %f4
prefetch [%o1 + 0x180], #one_read
faligndata %f0, %f2, %f16
ldd [%o1 + 0x018], %f6
faligndata %f2, %f4, %f18
ldd [%o1 + 0x020], %f8
faligndata %f4, %f6, %f20
ldd [%o1 + 0x028], %f10
faligndata %f6, %f8, %f22
ldd [%o1 + 0x030], %f12
faligndata %f8, %f10, %f24
ldd [%o1 + 0x038], %f14
faligndata %f10, %f12, %f26
ldd [%o1 + 0x040], %f0
sub %o4, 0x80, %o4
add %o1, 0x40, %o1
ba,pt %xcc, 1f
srl %o4, 6, %o3
.align 64
1:
ldd [%o1 + 0x008], %f2
faligndata %f12, %f14, %f28
ldd [%o1 + 0x010], %f4
faligndata %f14, %f0, %f30
EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS)
ldd [%o1 + 0x018], %f6
faligndata %f0, %f2, %f16
ldd [%o1 + 0x020], %f8
faligndata %f2, %f4, %f18
ldd [%o1 + 0x028], %f10
faligndata %f4, %f6, %f20
ldd [%o1 + 0x030], %f12
faligndata %f6, %f8, %f22
ldd [%o1 + 0x038], %f14
faligndata %f8, %f10, %f24
ldd [%o1 + 0x040], %f0
prefetch [%o1 + 0x180], #one_read
faligndata %f10, %f12, %f26
subcc %o3, 0x01, %o3
add %o1, 0x40, %o1
bg,pt %XCC, 1b
add %o0, 0x40, %o0
/* Finally we copy the last full 64-byte block. */
ldd [%o1 + 0x008], %f2
faligndata %f12, %f14, %f28
ldd [%o1 + 0x010], %f4
faligndata %f14, %f0, %f30
EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS)
ldd [%o1 + 0x018], %f6
faligndata %f0, %f2, %f16
ldd [%o1 + 0x020], %f8
faligndata %f2, %f4, %f18
ldd [%o1 + 0x028], %f10
faligndata %f4, %f6, %f20
ldd [%o1 + 0x030], %f12
faligndata %f6, %f8, %f22
ldd [%o1 + 0x038], %f14
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
ldd [%o1 + 0x040], %f0
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS)
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
* load past the end of the src buffer.
*/
and %o2, 0x3f, %o2
andcc %o2, 0x38, %g2
be,pn %XCC, 2f
subcc %g2, 0x8, %g2
be,pn %XCC, 2f
cmp %g1, 0
be,a,pt %XCC, 1f
ldd [%o1 + 0x00], %f0
1: ldd [%o1 + 0x08], %f2
add %o1, 0x8, %o1
sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
be,pn %XCC, 2f
add %o0, 0x8, %o0
ldd [%o1 + 0x08], %f0
add %o1, 0x8, %o1
sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
bne,pn %XCC, 1b
add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
2:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
be,pn %XCC, 85f
sub %o0, %o1, %o3
andcc %g1, 0x7, %g0
bne,pn %icc, 90f
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
ldx [%o1], %o5
EXNV(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
lduw [%o1], %o5
EXNV(stwa %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x7)
add %o1, 0x4, %o1
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
lduh [%o1], %o5
EXNV(stha %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x3)
add %o1, 0x2, %o1
1: andcc %o2, 0x1, %g0
be,pt %icc, 85f
nop
ldub [%o1], %o5
ba,pt %xcc, 85f
EXNV(stba %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x1)
70: /* 16 < len <= 64 */
bne,pn %XCC, 90f
sub %o0, %o1, %o3
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
ldx [%o1], %o5
EXNV4(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %o4)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
lduw [%o1], %o5
EXNV3(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1:
subcc %o2, 4, %o2
lduw [%o1], %g1
EXNV3(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
clr %o0
.align 32 #include "U3memcpy.S"
90:
subcc %o2, 1, %o2
ldub [%o1], %g1
EXNV2(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
clr %o0
...@@ -6,14 +6,50 @@ ...@@ -6,14 +6,50 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#else #else
#define ASI_BLK_P 0xf0 #define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04 #define FPRS_FEF 0x04
#ifdef MEMCPY_DEBUG
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#else
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#endif #endif
#endif
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef STORE_BLK
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
#endif
#ifndef FUNC_NAME
#define FUNC_NAME U3memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC #ifndef XCC
#define XCC xcc #define XCC xcc
...@@ -33,7 +69,7 @@ ...@@ -33,7 +69,7 @@
*/ */
.text .text
.align 32 .align 64
/* The cheetah's flexible spine, oversized liver, enlarged heart, /* The cheetah's flexible spine, oversized liver, enlarged heart,
* slender muscular body, and claws make it the swiftest hunter * slender muscular body, and claws make it the swiftest hunter
...@@ -41,137 +77,157 @@ ...@@ -41,137 +77,157 @@
* of up to 2.4GB per second. * of up to 2.4GB per second.
*/ */
.globl U3memcpy .globl FUNC_NAME
U3memcpy: /* %o0=dst, %o1=src, %o2=len */ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
PREAMBLE
mov %o0, %g5 mov %o0, %g5
cmp %o2, 0 cmp %o2, 0
be,pn %XCC, 85f be,pn %XCC, 85f
or %o0, %o1, %o3 or %o0, %o1, %o3
cmp %o2, 16 cmp %o2, 16
bleu,a,pn %XCC, 70f blu,a,pn %XCC, 80f
or %o3, %o2, %o3 or %o3, %o2, %o3
cmp %o2, 256 cmp %o2, (3 * 64)
blu,pt %XCC, 80f blu,pt %XCC, 70f
andcc %o3, 0x7, %g0 andcc %o3, 0x7, %g0
ba,pt %xcc, 1f /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
andcc %o0, 0x3f, %g2 * o5 from here until we hit VISExitHalf.
/* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/ */
.align 64 VISEntryHalf
1:
/* Is 'dst' already aligned on an 64-byte boundary? */ /* Is 'dst' already aligned on an 64-byte boundary? */
andcc %o0, 0x3f, %g2
be,pt %XCC, 2f be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre- * of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'. * subtract this from 'len'.
*/ */
sub %o0, %o1, %o4
sub %g2, 0x40, %g2 sub %g2, 0x40, %g2
sub %g0, %g2, %g2 sub %g0, %g2, %g2
sub %o2, %g2, %o2 sub %o2, %g2, %o2
andcc %g2, 0x7, %g1
be,pt %icc, 2f
and %g2, 0x38, %g2
/* Copy %g2 bytes from src to dst, one byte at a time. */ 1: subcc %g1, 0x1, %g1
1: ldub [%o1 + 0x00], %o3 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
EX_ST(STORE(stb, %o3, %o1 + %o4))
bgu,pt %XCC, 1b
add %o1, 0x1, %o1 add %o1, 0x1, %o1
add %o0, 0x1, %o0
subcc %g2, 0x1, %g2
bg,pt %XCC, 1b add %o1, %o4, %o0
stb %o3, [%o0 + -1]
2: VISEntryHalf 2: cmp %g2, 0x0
and %o1, 0x7, %g1 and %o1, 0x7, %g1
ba,pt %xcc, 1f be,pt %icc, 3f
alignaddr %o1, %g0, %o1 alignaddr %o1, %g0, %o1
.align 64 EX_LD(LOAD(ldd, %o1, %f4))
1: 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
membar #StoreLoad | #StoreStore | #LoadStore add %o1, 0x8, %o1
prefetch [%o1 + 0x000], #one_read subcc %g2, 0x8, %g2
prefetch [%o1 + 0x040], #one_read faligndata %f4, %f6, %f0
EX_ST(STORE(std, %f0, %o0))
be,pn %icc, 3f
add %o0, 0x8, %o0
EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
EX_ST(STORE(std, %f2, %o0))
bne,pt %icc, 1b
add %o0, 0x8, %o0
3: LOAD(prefetch, %o1 + 0x000, #one_read)
LOAD(prefetch, %o1 + 0x040, #one_read)
andn %o2, (0x40 - 1), %o4 andn %o2, (0x40 - 1), %o4
prefetch [%o1 + 0x080], #one_read LOAD(prefetch, %o1 + 0x080, #one_read)
prefetch [%o1 + 0x0c0], #one_read LOAD(prefetch, %o1 + 0x0c0, #one_read)
ldd [%o1 + 0x000], %f0 LOAD(prefetch, %o1 + 0x100, #one_read)
prefetch [%o1 + 0x100], #one_read EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
ldd [%o1 + 0x008], %f2 LOAD(prefetch, %o1 + 0x140, #one_read)
prefetch [%o1 + 0x140], #one_read EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
ldd [%o1 + 0x010], %f4 LOAD(prefetch, %o1 + 0x180, #one_read)
prefetch [%o1 + 0x180], #one_read EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16 faligndata %f0, %f2, %f16
ldd [%o1 + 0x018], %f6 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
faligndata %f2, %f4, %f18 faligndata %f2, %f4, %f18
ldd [%o1 + 0x020], %f8 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
faligndata %f4, %f6, %f20 faligndata %f4, %f6, %f20
ldd [%o1 + 0x028], %f10 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
faligndata %f6, %f8, %f22 faligndata %f6, %f8, %f22
ldd [%o1 + 0x030], %f12 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
faligndata %f8, %f10, %f24 faligndata %f8, %f10, %f24
ldd [%o1 + 0x038], %f14 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
faligndata %f10, %f12, %f26 faligndata %f10, %f12, %f26
ldd [%o1 + 0x040], %f0 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
sub %o4, 0x80, %o4 subcc %o4, 0x80, %o4
add %o1, 0x40, %o1 add %o1, 0x40, %o1
ba,pt %xcc, 1f bgu,pt %XCC, 1f
srl %o4, 6, %o3 srl %o4, 6, %o3
ba,pt %xcc, 2f
nop
.align 64 .align 64
1: 1:
ldd [%o1 + 0x008], %f2 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
faligndata %f12, %f14, %f28 faligndata %f12, %f14, %f28
ldd [%o1 + 0x010], %f4 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
faligndata %f14, %f0, %f30 faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P EX_ST(STORE_BLK(%f16, %o0))
ldd [%o1 + 0x018], %f6 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
faligndata %f0, %f2, %f16 faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
ldd [%o1 + 0x020], %f8 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
faligndata %f2, %f4, %f18 faligndata %f2, %f4, %f18
ldd [%o1 + 0x028], %f10 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
faligndata %f4, %f6, %f20 faligndata %f4, %f6, %f20
ldd [%o1 + 0x030], %f12 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22 faligndata %f6, %f8, %f22
ldd [%o1 + 0x038], %f14 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
faligndata %f8, %f10, %f24
ldd [%o1 + 0x040], %f0 faligndata %f8, %f10, %f24
prefetch [%o1 + 0x180], #one_read EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26 faligndata %f10, %f12, %f26
subcc %o3, 0x01, %o3
add %o1, 0x40, %o1
bg,pt %XCC, 1b bg,pt %XCC, 1b
add %o0, 0x40, %o0 add %o1, 0x40, %o1
/* Finally we copy the last full 64-byte block. */ /* Finally we copy the last full 64-byte block. */
ldd [%o1 + 0x008], %f2 2:
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
faligndata %f12, %f14, %f28 faligndata %f12, %f14, %f28
ldd [%o1 + 0x010], %f4 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
faligndata %f14, %f0, %f30 faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P EX_ST(STORE_BLK(%f16, %o0))
ldd [%o1 + 0x018], %f6 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
faligndata %f0, %f2, %f16 faligndata %f0, %f2, %f16
ldd [%o1 + 0x020], %f8 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
faligndata %f2, %f4, %f18 faligndata %f2, %f4, %f18
ldd [%o1 + 0x028], %f10 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
faligndata %f4, %f6, %f20 faligndata %f4, %f6, %f20
ldd [%o1 + 0x030], %f12 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
faligndata %f6, %f8, %f22 faligndata %f6, %f8, %f22
ldd [%o1 + 0x038], %f14 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
faligndata %f8, %f10, %f24 faligndata %f8, %f10, %f24
cmp %g1, 0 cmp %g1, 0
be,pt %XCC, 1f be,pt %XCC, 1f
add %o0, 0x40, %o0 add %o0, 0x40, %o0
ldd [%o1 + 0x040], %f0 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
1: faligndata %f10, %f12, %f26 1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28 faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30 faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P EX_ST(STORE_BLK(%f16, %o0))
add %o0, 0x40, %o0 add %o0, 0x40, %o0
add %o1, 0x40, %o1 add %o1, 0x40, %o1
membar #Sync membar #Sync
...@@ -189,23 +245,22 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */ ...@@ -189,23 +245,22 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */
be,pn %XCC, 2f be,pn %XCC, 2f
cmp %g1, 0 cmp %g1, 0
sub %o2, %g2, %o2
be,a,pt %XCC, 1f be,a,pt %XCC, 1f
ldd [%o1 + 0x00], %f0 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
1: ldd [%o1 + 0x08], %f2 1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
add %o1, 0x8, %o1 add %o1, 0x8, %o1
sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2 subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8 faligndata %f0, %f2, %f8
std %f8, [%o0 + 0x00] EX_ST(STORE(std, %f8, %o0))
be,pn %XCC, 2f be,pn %XCC, 2f
add %o0, 0x8, %o0 add %o0, 0x8, %o0
ldd [%o1 + 0x08], %f0 EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
add %o1, 0x8, %o1 add %o1, 0x8, %o1
sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2 subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8 faligndata %f2, %f0, %f8
std %f8, [%o0 + 0x00] EX_ST(STORE(std, %f8, %o0))
bne,pn %XCC, 1b bne,pn %XCC, 1b
add %o0, 0x8, %o0 add %o0, 0x8, %o0
...@@ -225,48 +280,60 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */ ...@@ -225,48 +280,60 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */
andcc %o2, 0x8, %g0 andcc %o2, 0x8, %g0
be,pt %icc, 1f be,pt %icc, 1f
nop nop
ldx [%o1], %o5 EX_LD(LOAD(ldx, %o1, %o5))
stx %o5, [%o1 + %o3] EX_ST(STORE(stx, %o5, %o1 + %o3))
add %o1, 0x8, %o1 add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0 1: andcc %o2, 0x4, %g0
be,pt %icc, 1f be,pt %icc, 1f
nop nop
lduw [%o1], %o5 EX_LD(LOAD(lduw, %o1, %o5))
stw %o5, [%o1 + %o3] EX_ST(STORE(stw, %o5, %o1 + %o3))
add %o1, 0x4, %o1 add %o1, 0x4, %o1
1: andcc %o2, 0x2, %g0 1: andcc %o2, 0x2, %g0
be,pt %icc, 1f be,pt %icc, 1f
nop nop
lduh [%o1], %o5 EX_LD(LOAD(lduh, %o1, %o5))
sth %o5, [%o1 + %o3] EX_ST(STORE(sth, %o5, %o1 + %o3))
add %o1, 0x2, %o1 add %o1, 0x2, %o1
1: andcc %o2, 0x1, %g0 1: andcc %o2, 0x1, %g0
be,pt %icc, 85f be,pt %icc, 85f
nop nop
ldub [%o1], %o5 EX_LD(LOAD(ldub, %o1, %o5))
ba,pt %xcc, 85f ba,pt %xcc, 85f
stb %o5, [%o1 + %o3] EX_ST(STORE(stb, %o5, %o1 + %o3))
.align 64
70: /* 16 < len <= 64 */ 70: /* 16 < len <= 64 */
bne,pn %XCC, 90f bne,pn %XCC, 75f
sub %o0, %o1, %o3 sub %o0, %o1, %o3
andn %o2, 0x7, %o4 72:
and %o2, 0x7, %o2 andn %o2, 0xf, %o4
1: subcc %o4, 0x8, %o4 and %o2, 0xf, %o2
ldx [%o1], %o5 1: subcc %o4, 0x10, %o4
stx %o5, [%o1 + %o3] EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
EX_ST(STORE(stx, %o5, %o1 + %o3))
add %o1, 0x8, %o1
EX_ST(STORE(stx, %g1, %o1 + %o3))
bgu,pt %XCC, 1b bgu,pt %XCC, 1b
add %o1, 0x8, %o1 add %o1, 0x8, %o1
andcc %o2, 0x4, %g0 73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
EX_LD(LOAD(ldx, %o1, %o5))
EX_ST(STORE(stx, %o5, %o1 + %o3))
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f be,pt %XCC, 1f
nop nop
sub %o2, 0x4, %o2 sub %o2, 0x4, %o2
lduw [%o1], %o5 EX_LD(LOAD(lduw, %o1, %o5))
stw %o5, [%o1 + %o3] EX_ST(STORE(stw, %o5, %o1 + %o3))
add %o1, 0x4, %o1 add %o1, 0x4, %o1
1: cmp %o2, 0 1: cmp %o2, 0
be,pt %XCC, 85f be,pt %XCC, 85f
...@@ -274,6 +341,53 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */ ...@@ -274,6 +341,53 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */
ba,pt %xcc, 90f ba,pt %xcc, 90f
nop nop
75:
andcc %o0, 0x7, %g1
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
EX_LD(LOAD(ldub, %o1, %o5))
EX_ST(STORE(stb, %o5, %o1 + %o3))
bgu,pt %icc, 1b
add %o1, 1, %o1
2: add %o1, %o3, %o0
andcc %o1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
cmp %o2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
8: mov 64, %o3
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1, %g2))
sub %o3, %g1, %o3
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
subcc %o4, 0x8, %o4
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
EX_ST(STORE(stx, %o5, %o0))
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
andcc %o2, 0x7, %o2
be,pn %icc, 85f
add %o1, %g1, %o1
ba,pt %xcc, 90f
sub %o0, %o1, %o3
.align 64
80: /* 0 < len <= 16 */ 80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0 andcc %o3, 0x3, %g0
bne,pn %XCC, 90f bne,pn %XCC, 90f
...@@ -281,34 +395,20 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */ ...@@ -281,34 +395,20 @@ U3memcpy: /* %o0=dst, %o1=src, %o2=len */
1: 1:
subcc %o2, 4, %o2 subcc %o2, 4, %o2
lduw [%o1], %g1 EX_LD(LOAD(lduw, %o1, %g1))
stw %g1, [%o1 + %o3] EX_ST(STORE(stw, %g1, %o1 + %o3))
bgu,pt %XCC, 1b bgu,pt %XCC, 1b
add %o1, 4, %o1 add %o1, 4, %o1
85: retl 85: retl
mov %g5, %o0 mov EX_RETVAL(%g5), %o0
.align 32 .align 32
90: 90:
subcc %o2, 1, %o2 subcc %o2, 1, %o2
ldub [%o1], %g1 EX_LD(LOAD(ldub, %o1, %g1))
stb %g1, [%o1 + %o3] EX_ST(STORE(stb, %g1, %o1 + %o3))
bgu,pt %XCC, 90b bgu,pt %XCC, 90b
add %o1, 1, %o1 add %o1, 1, %o1
retl retl
mov %g5, %o0 mov EX_RETVAL(%g5), %o0
/* Act like copy_{to,in}_user(), ie. return zero instead
* of original destination pointer. This is invoked when
* copy_{to,in}_user() finds that %asi is kernel space.
*/
.globl U3memcpy_user_stub
U3memcpy_user_stub:
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
call U3memcpy
mov %i2, %o2
ret
restore %g0, %g0, %o0
/* U3patch.S: Patch Ultra-I routines with Ultra-III variant.
*
* Copyright (C) 2004 David S. Miller <davem@redhat.com>
*/
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define ULTRA3_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
srl %g1, 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl cheetah_patch_copyops
cheetah_patch_copyops:
ULTRA3_DO_PATCH(memcpy, U3memcpy)
ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
retl
nop
/* $Id: VIScopy.S,v 1.27 2002/02/09 19:49:30 davem Exp $
* VIScopy.S: High speed copy operations utilizing the UltraSparc
* Visual Instruction Set.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include "VIS.h"
/* VIS code can be used for numerous copy/set operation variants.
* It can be made to work in the kernel, one single instance,
* for all of memcpy, copy_to_user, and copy_from_user by setting
* the ASI src/dest globals correctly. Furthermore it can
* be used for kernel-->kernel page copies as well, a hook label
* is put in here just for this purpose.
*
* For userland, compiling this without __KERNEL__ defined makes
* it work just fine as a generic libc bcopy and memcpy.
* If for userland it is compiled with a 32bit gcc (but you need
* -Wa,-Av9a for as), the code will just rely on lower 32bits of
* IEU registers, if you compile it with 64bit gcc (ie. define
* __sparc_v9__), the code will use full 64bit.
*/
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/thread_info.h>
#define FPU_CLEAN_RETL \
ldub [%g6 + TI_CURRENT_DS], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define FPU_RETL \
ldub [%g6 + TI_CURRENT_DS], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define NORMAL_RETL \
ldub [%g6 + TI_CURRENT_DS], %o1; \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: ba VIScopyfixup_ret; \
a, b, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EX2(x,y,c,d,e,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: c, d, e; \
ba VIScopyfixup_ret; \
a, b, %o1; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXO2(x,y) \
98: x,y; \
.section __ex_table; \
.align 4; \
.word 98b, VIScopyfixup_reto2; \
.text; \
.align 4;
#define EXVISN(x,y,n) \
98: x,y; \
.section __ex_table; \
.align 4; \
.word 98b, VIScopyfixup_vis##n; \
.text; \
.align 4;
#define EXT(start,end,handler) \
.section __ex_table; \
.align 4; \
.word start, 0, end, handler; \
.text; \
.align 4;
#else
#ifdef REGS_64BIT
#define FPU_CLEAN_RETL \
retl; \
mov %g6, %o0;
#define FPU_RETL \
retl; \
mov %g6, %o0;
#else
#define FPU_CLEAN_RETL \
wr %g0, FPRS_FEF, %fprs; \
retl; \
mov %g6, %o0;
#define FPU_RETL \
wr %g0, FPRS_FEF, %fprs; \
retl; \
mov %g6, %o0;
#endif
#define NORMAL_RETL \
retl; \
mov %g6, %o0;
#define EX(x,y,a,b) x,y
#define EX2(x,y,c,d,e,a,b) x,y
#define EXO2(x,y) x,y
#define EXVISN(x,y,n) x,y
#define EXT(a,b,c)
#endif
#define EXVIS(x,y) EXVISN(x,y,0)
#define EXVIS1(x,y) EXVISN(x,y,1)
#define EXVIS2(x,y) EXVISN(x,y,2)
#define EXVIS3(x,y) EXVISN(x,y,3)
#define EXVIS4(x,y) EXVISN(x,y,4)
#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
faligndata %f1, %f2, %f48; \
faligndata %f2, %f3, %f50; \
faligndata %f3, %f4, %f52; \
faligndata %f4, %f5, %f54; \
faligndata %f5, %f6, %f56; \
faligndata %f6, %f7, %f58; \
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
EXVIS(LDBLK [%src] ASIBLK, %fdest); \
ASI_SETDST_BLK \
EXVIS(STBLK %fsrc, [%dest] ASIBLK); \
add %src, 0x40, %src; \
subcc %len, 0x40, %len; \
be,pn %xcc, jmptgt; \
add %dest, 0x40, %dest; \
ASI_SETSRC_BLK
#define LOOP_CHUNK1(src, dest, len, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
#define LOOP_CHUNK2(src, dest, len, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
#define LOOP_CHUNK3(src, dest, len, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
#define STORE_SYNC(dest, fsrc) \
EXVIS(STBLK %fsrc, [%dest] ASIBLK); \
add %dest, 0x40, %dest;
#ifdef __KERNEL__
#define STORE_JUMP(dest, fsrc, target) \
srl asi_dest, 3, %g5; \
EXVIS2(STBLK %fsrc, [%dest] ASIBLK); \
xor asi_dest, ASI_BLK_XOR1, asi_dest;\
add %dest, 0x40, %dest; \
xor asi_dest, %g5, asi_dest; \
ba,pt %xcc, target;
#else
#define STORE_JUMP(dest, fsrc, target) \
EXVIS2(STBLK %fsrc, [%dest] ASIBLK); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target;
#endif
#ifndef __KERNEL__
#define VISLOOP_PAD nop; nop; nop; nop; \
nop; nop; nop; nop; \
nop; nop; nop; nop; \
nop; nop; nop;
#else
#define VISLOOP_PAD
#endif
#define FINISH_VISCHUNK(dest, f0, f1, left) \
ASI_SETDST_NOBLK \
subcc %left, 8, %left; \
bl,pn %xcc, vis_out; \
faligndata %f0, %f1, %f48; \
EXVIS3(STDF %f48, [%dest] ASINORMAL); \
add %dest, 8, %dest;
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
subcc %left, 8, %left; \
bl,pn %xcc, vis_out; \
fsrc1 %f0, %f1;
#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
ba,a,pt %xcc, vis_out_slk;
/* Macros for non-VIS memcpy code. */
#ifdef REGS_64BIT
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
ASI_SETSRC_NOBLK \
LDX [%src + offset + 0x00] ASINORMAL, %t0; \
LDX [%src + offset + 0x08] ASINORMAL, %t1; \
LDX [%src + offset + 0x10] ASINORMAL, %t2; \
LDX [%src + offset + 0x18] ASINORMAL, %t3; \
ASI_SETDST_NOBLK \
STW %t0, [%dst + offset + 0x04] ASINORMAL; \
srlx %t0, 32, %t0; \
STW %t0, [%dst + offset + 0x00] ASINORMAL; \
STW %t1, [%dst + offset + 0x0c] ASINORMAL; \
srlx %t1, 32, %t1; \
STW %t1, [%dst + offset + 0x08] ASINORMAL; \
STW %t2, [%dst + offset + 0x14] ASINORMAL; \
srlx %t2, 32, %t2; \
STW %t2, [%dst + offset + 0x10] ASINORMAL; \
STW %t3, [%dst + offset + 0x1c] ASINORMAL; \
srlx %t3, 32, %t3; \
STW %t3, [%dst + offset + 0x18] ASINORMAL;
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
ASI_SETSRC_NOBLK \
LDX [%src + offset + 0x00] ASINORMAL, %t0; \
LDX [%src + offset + 0x08] ASINORMAL, %t1; \
LDX [%src + offset + 0x10] ASINORMAL, %t2; \
LDX [%src + offset + 0x18] ASINORMAL, %t3; \
ASI_SETDST_NOBLK \
STX %t0, [%dst + offset + 0x00] ASINORMAL; \
STX %t1, [%dst + offset + 0x08] ASINORMAL; \
STX %t2, [%dst + offset + 0x10] ASINORMAL; \
STX %t3, [%dst + offset + 0x18] ASINORMAL; \
ASI_SETSRC_NOBLK \
LDX [%src + offset + 0x20] ASINORMAL, %t0; \
LDX [%src + offset + 0x28] ASINORMAL, %t1; \
LDX [%src + offset + 0x30] ASINORMAL, %t2; \
LDX [%src + offset + 0x38] ASINORMAL, %t3; \
ASI_SETDST_NOBLK \
STX %t0, [%dst + offset + 0x20] ASINORMAL; \
STX %t1, [%dst + offset + 0x28] ASINORMAL; \
STX %t2, [%dst + offset + 0x30] ASINORMAL; \
STX %t3, [%dst + offset + 0x38] ASINORMAL;
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
ASI_SETSRC_NOBLK \
LDX [%src - offset - 0x10] ASINORMAL, %t0; \
LDX [%src - offset - 0x08] ASINORMAL, %t1; \
ASI_SETDST_NOBLK \
STW %t0, [%dst - offset - 0x0c] ASINORMAL; \
srlx %t0, 32, %t2; \
STW %t2, [%dst - offset - 0x10] ASINORMAL; \
STW %t1, [%dst - offset - 0x04] ASINORMAL; \
srlx %t1, 32, %t3; \
STW %t3, [%dst - offset - 0x08] ASINORMAL;
#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
ASI_SETSRC_NOBLK \
LDX [%src - offset - 0x10] ASINORMAL, %t0; \
LDX [%src - offset - 0x08] ASINORMAL, %t1; \
ASI_SETDST_NOBLK \
STX %t0, [%dst - offset - 0x10] ASINORMAL; \
STX %t1, [%dst - offset - 0x08] ASINORMAL;
#else /* !REGS_64BIT */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
lduw [%src + offset + 0x00], %t0; \
lduw [%src + offset + 0x04], %t1; \
lduw [%src + offset + 0x08], %t2; \
lduw [%src + offset + 0x0c], %t3; \
stw %t0, [%dst + offset + 0x00]; \
stw %t1, [%dst + offset + 0x04]; \
stw %t2, [%dst + offset + 0x08]; \
stw %t3, [%dst + offset + 0x0c]; \
lduw [%src + offset + 0x10], %t0; \
lduw [%src + offset + 0x14], %t1; \
lduw [%src + offset + 0x18], %t2; \
lduw [%src + offset + 0x1c], %t3; \
stw %t0, [%dst + offset + 0x10]; \
stw %t1, [%dst + offset + 0x14]; \
stw %t2, [%dst + offset + 0x18]; \
stw %t3, [%dst + offset + 0x1c];
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
lduw [%src - offset - 0x10], %t0; \
lduw [%src - offset - 0x0c], %t1; \
lduw [%src - offset - 0x08], %t2; \
lduw [%src - offset - 0x04], %t3; \
stw %t0, [%dst - offset - 0x10]; \
stw %t1, [%dst - offset - 0x0c]; \
stw %t2, [%dst - offset - 0x08]; \
stw %t3, [%dst - offset - 0x04];
#endif /* !REGS_64BIT */
#ifdef __KERNEL__
.section __ex_table,#alloc
.section .fixup,#alloc,#execinstr
#endif
.text
.align 32
.globl memcpy
.type memcpy,@function
.globl bcopy
.type bcopy,@function
#ifdef __KERNEL__
memcpy_private:
memcpy: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov ASI_P, asi_dest ! IEU1
retl
clr %o0
.align 32
.globl __copy_from_user
.type __copy_from_user,@function
__copy_from_user:rd %asi, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov ASI_P, asi_dest ! IEU1
.globl __copy_to_user
.type __copy_to_user,@function
__copy_to_user: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
rd %asi, asi_dest ! IEU1
retl ! CTI Group
clr %o0 ! IEU0 Group
.globl __copy_in_user
.type __copy_in_user,@function
__copy_in_user: rd %asi, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov asi_src, asi_dest ! IEU1
retl ! CTI Group
clr %o0 ! IEU0 Group
#endif
bcopy: or %o0, 0, %g3 ! IEU0 Group
addcc %o1, 0, %o0 ! IEU1
brgez,pt %o2, memcpy_private ! CTI
or %g3, 0, %o1 ! IEU0 Group
retl ! CTI Group brk forced
clr %o0 ! IEU0
#ifdef __KERNEL__
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define ULTRA3_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
srl %g1, 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl cheetah_patch_copyops
cheetah_patch_copyops:
ULTRA3_DO_PATCH(memcpy, U3memcpy)
ULTRA3_DO_PATCH(__copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(__copy_to_user, U3copy_to_user)
ULTRA3_DO_PATCH(__copy_in_user, U3copy_in_user)
retl
nop
#undef BRANCH_ALWAYS
#undef NOP
#undef ULTRA3_DO_PATCH
#endif /* __KERNEL__ */
.align 32
#ifdef __KERNEL__
andcc %o0, 7, %g2 ! IEU1 Group
#endif
VIS_enter:
be,pt %xcc, dest_is_8byte_aligned ! CTI
#ifdef __KERNEL__
nop ! IEU0 Group
#else
andcc %o0, 0x38, %g5 ! IEU1 Group
#endif
do_dest_8byte_align:
mov 8, %g1 ! IEU0
sub %g1, %g2, %g2 ! IEU0 Group
andcc %o0, 1, %g0 ! IEU1
be,pt %icc, 2f ! CTI
sub %o2, %g2, %o2 ! IEU0 Group
1: ASI_SETSRC_NOBLK ! LSU Group
EX(LDUB [%o1] ASINORMAL, %o5,
add %o2, %g2) ! Load Group
add %o1, 1, %o1 ! IEU0
add %o0, 1, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
subcc %g2, 1, %g2 ! IEU1 Group
be,pn %xcc, 3f ! CTI
EX2(STB %o5, [%o0 - 1] ASINORMAL,
add %g2, 1, %g2,
add %o2, %g2) ! Store
2: ASI_SETSRC_NOBLK ! LSU Group
EX(LDUB [%o1] ASINORMAL, %o5,
add %o2, %g2) ! Load Group
add %o0, 2, %o0 ! IEU0
EX2(LDUB [%o1 + 1] ASINORMAL, %g3,
sub %o0, 2, %o0,
add %o2, %g2) ! Load Group
ASI_SETDST_NOBLK ! LSU Group
subcc %g2, 2, %g2 ! IEU1 Group
EX2(STB %o5, [%o0 - 2] ASINORMAL,
add %g2, 2, %g2,
add %o2, %g2) ! Store
add %o1, 2, %o1 ! IEU0
bne,pt %xcc, 2b ! CTI Group
EX2(STB %g3, [%o0 - 1] ASINORMAL,
add %g2, 1, %g2,
add %o2, %g2) ! Store
#ifdef __KERNEL__
3:
dest_is_8byte_aligned:
VISEntry
andcc %o0, 0x38, %g5 ! IEU1 Group
#else
3: andcc %o0, 0x38, %g5 ! IEU1 Group
dest_is_8byte_aligned:
#endif
be,pt %icc, dest_is_64byte_aligned ! CTI
mov 64, %g1 ! IEU0
fmovd %f0, %f2 ! FPU
sub %g1, %g5, %g5 ! IEU0 Group
ASI_SETSRC_NOBLK ! LSU Group
alignaddr %o1, %g0, %g1 ! GRU Group
EXO2(LDDF [%g1] ASINORMAL, %f4) ! Load Group
sub %o2, %g5, %o2 ! IEU0
1: EX(LDDF [%g1 + 0x8] ASINORMAL, %f6,
add %o2, %g5) ! Load Group
add %g1, 0x8, %g1 ! IEU0 Group
subcc %g5, 8, %g5 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
faligndata %f4, %f6, %f0 ! GRU Group
EX2(STDF %f0, [%o0] ASINORMAL,
add %g5, 8, %g5,
add %o2, %g5) ! Store
add %o1, 8, %o1 ! IEU0 Group
be,pn %xcc, dest_is_64byte_aligned ! CTI
add %o0, 8, %o0 ! IEU1
ASI_SETSRC_NOBLK ! LSU Group
EX(LDDF [%g1 + 0x8] ASINORMAL, %f4,
add %o2, %g5) ! Load Group
add %g1, 8, %g1 ! IEU0
subcc %g5, 8, %g5 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
faligndata %f6, %f4, %f0 ! GRU Group
EX2(STDF %f0, [%o0] ASINORMAL,
add %g5, 8, %g5,
add %o2, %g5) ! Store
add %o1, 8, %o1 ! IEU0
ASI_SETSRC_NOBLK ! LSU Group
bne,pt %xcc, 1b ! CTI Group
add %o0, 8, %o0 ! IEU0
dest_is_64byte_aligned:
membar #LoadStore | #StoreStore | #StoreLoad ! LSU Group
#ifndef __KERNEL__
wr %g0, ASI_BLK_P, %asi ! LSU Group
#endif
subcc %o2, 0x40, %g7 ! IEU1 Group
mov %o1, %g1 ! IEU0
andncc %g7, (0x40 - 1), %g7 ! IEU1 Group
srl %g1, 3, %g2 ! IEU0
sub %o2, %g7, %g3 ! IEU0 Group
andn %o1, (0x40 - 1), %o1 ! IEU1
and %g2, 7, %g2 ! IEU0 Group
andncc %g3, 0x7, %g3 ! IEU1
fmovd %f0, %f2 ! FPU
sub %g3, 0x10, %g3 ! IEU0 Group
sub %o2, %g7, %o2 ! IEU1
#ifdef __KERNEL__
or asi_src, ASI_BLK_OR, asi_src ! IEU0 Group
or asi_dest, ASI_BLK_OR, asi_dest ! IEU1
#endif
alignaddr %g1, %g0, %g0 ! GRU Group
add %g1, %g7, %g1 ! IEU0 Group
subcc %o2, %g3, %o2 ! IEU1
ASI_SETSRC_BLK ! LSU Group
EXVIS1(LDBLK [%o1 + 0x00] ASIBLK, %f0) ! LSU Group
add %g1, %g3, %g1 ! IEU0
EXVIS1(LDBLK [%o1 + 0x40] ASIBLK, %f16) ! LSU Group
sub %g7, 0x80, %g7 ! IEU0
EXVIS(LDBLK [%o1 + 0x80] ASIBLK, %f32) ! LSU Group
#ifdef __KERNEL__
vispc: sll %g2, 9, %g2 ! IEU0 Group
sethi %hi(vis00), %g5 ! IEU1
or %g5, %lo(vis00), %g5 ! IEU0 Group
jmpl %g5 + %g2, %g0 ! CTI Group brk forced
addcc %o1, 0xc0, %o1 ! IEU1 Group
#else
! Clk1 Group 8-(
! Clk2 Group 8-(
! Clk3 Group 8-(
! Clk4 Group 8-(
vispc: rd %pc, %g5 ! PDU Group 8-(
addcc %g5, %lo(vis00 - vispc), %g5 ! IEU1 Group
sll %g2, 9, %g2 ! IEU0
jmpl %g5 + %g2, %g0 ! CTI Group brk forced
addcc %o1, 0xc0, %o1 ! IEU1 Group
#endif
.align 512 /* OK, here comes the fun part... */
vis00:FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) LOOP_CHUNK1(o1, o0, g7, vis01)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) LOOP_CHUNK2(o1, o0, g7, vis02)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) LOOP_CHUNK3(o1, o0, g7, vis03)
b,pt %xcc, vis00+4; faligndata %f0, %f2, %f48
vis01:FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_JUMP(o0, f48, finish_f0) membar #Sync
vis02:FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_JUMP(o0, f48, finish_f16) membar #Sync
vis03:FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_JUMP(o0, f48, finish_f32) membar #Sync
VISLOOP_PAD
vis10:FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) LOOP_CHUNK1(o1, o0, g7, vis11)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) LOOP_CHUNK2(o1, o0, g7, vis12)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) LOOP_CHUNK3(o1, o0, g7, vis13)
b,pt %xcc, vis10+4; faligndata %f2, %f4, %f48
vis11:FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_JUMP(o0, f48, finish_f2) membar #Sync
vis12:FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_JUMP(o0, f48, finish_f18) membar #Sync
vis13:FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_JUMP(o0, f48, finish_f34) membar #Sync
VISLOOP_PAD
vis20:FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) LOOP_CHUNK1(o1, o0, g7, vis21)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) LOOP_CHUNK2(o1, o0, g7, vis22)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) LOOP_CHUNK3(o1, o0, g7, vis23)
b,pt %xcc, vis20+4; faligndata %f4, %f6, %f48
vis21:FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_JUMP(o0, f48, finish_f4) membar #Sync
vis22:FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_JUMP(o0, f48, finish_f20) membar #Sync
vis23:FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_JUMP(o0, f48, finish_f36) membar #Sync
VISLOOP_PAD
vis30:FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) LOOP_CHUNK1(o1, o0, g7, vis31)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) LOOP_CHUNK2(o1, o0, g7, vis32)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) LOOP_CHUNK3(o1, o0, g7, vis33)
b,pt %xcc, vis30+4; faligndata %f6, %f8, %f48
vis31:FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_JUMP(o0, f48, finish_f6) membar #Sync
vis32:FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_JUMP(o0, f48, finish_f22) membar #Sync
vis33:FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_JUMP(o0, f48, finish_f38) membar #Sync
VISLOOP_PAD
vis40:FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) LOOP_CHUNK1(o1, o0, g7, vis41)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) LOOP_CHUNK2(o1, o0, g7, vis42)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) LOOP_CHUNK3(o1, o0, g7, vis43)
b,pt %xcc, vis40+4; faligndata %f8, %f10, %f48
vis41:FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_JUMP(o0, f48, finish_f8) membar #Sync
vis42:FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_JUMP(o0, f48, finish_f24) membar #Sync
vis43:FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_JUMP(o0, f48, finish_f40) membar #Sync
VISLOOP_PAD
vis50:FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) LOOP_CHUNK1(o1, o0, g7, vis51)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) LOOP_CHUNK2(o1, o0, g7, vis52)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) LOOP_CHUNK3(o1, o0, g7, vis53)
b,pt %xcc, vis50+4; faligndata %f10, %f12, %f48
vis51:FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_JUMP(o0, f48, finish_f10) membar #Sync
vis52:FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_JUMP(o0, f48, finish_f26) membar #Sync
vis53:FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_JUMP(o0, f48, finish_f42) membar #Sync
VISLOOP_PAD
vis60:FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) LOOP_CHUNK1(o1, o0, g7, vis61)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) LOOP_CHUNK2(o1, o0, g7, vis62)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) LOOP_CHUNK3(o1, o0, g7, vis63)
b,pt %xcc, vis60+4; faligndata %f12, %f14, %f48
vis61:FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_JUMP(o0, f48, finish_f12) membar #Sync
vis62:FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_JUMP(o0, f48, finish_f28) membar #Sync
vis63:FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_JUMP(o0, f48, finish_f44) membar #Sync
VISLOOP_PAD
vis70:FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) LOOP_CHUNK1(o1, o0, g7, vis71)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) LOOP_CHUNK2(o1, o0, g7, vis72)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) LOOP_CHUNK3(o1, o0, g7, vis73)
b,pt %xcc, vis70+4; faligndata %f14, %f16, %f48
vis71:FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_JUMP(o0, f48, finish_f14) membar #Sync
vis72:FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) STORE_JUMP(o0, f48, finish_f30) membar #Sync
vis73:FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) STORE_SYNC(o0, f48) membar #Sync
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_JUMP(o0, f48, finish_f46) membar #Sync
VISLOOP_PAD
finish_f0: FINISH_VISCHUNK(o0, f0, f2, g3)
finish_f2: FINISH_VISCHUNK(o0, f2, f4, g3)
finish_f4: FINISH_VISCHUNK(o0, f4, f6, g3)
finish_f6: FINISH_VISCHUNK(o0, f6, f8, g3)
finish_f8: FINISH_VISCHUNK(o0, f8, f10, g3)
finish_f10: FINISH_VISCHUNK(o0, f10, f12, g3)
finish_f12: FINISH_VISCHUNK(o0, f12, f14, g3)
finish_f14: UNEVEN_VISCHUNK(o0, f14, f0, g3)
finish_f16: FINISH_VISCHUNK(o0, f16, f18, g3)
finish_f18: FINISH_VISCHUNK(o0, f18, f20, g3)
finish_f20: FINISH_VISCHUNK(o0, f20, f22, g3)
finish_f22: FINISH_VISCHUNK(o0, f22, f24, g3)
finish_f24: FINISH_VISCHUNK(o0, f24, f26, g3)
finish_f26: FINISH_VISCHUNK(o0, f26, f28, g3)
finish_f28: FINISH_VISCHUNK(o0, f28, f30, g3)
finish_f30: UNEVEN_VISCHUNK(o0, f30, f0, g3)
finish_f32: FINISH_VISCHUNK(o0, f32, f34, g3)
finish_f34: FINISH_VISCHUNK(o0, f34, f36, g3)
finish_f36: FINISH_VISCHUNK(o0, f36, f38, g3)
finish_f38: FINISH_VISCHUNK(o0, f38, f40, g3)
finish_f40: FINISH_VISCHUNK(o0, f40, f42, g3)
finish_f42: FINISH_VISCHUNK(o0, f42, f44, g3)
finish_f44: FINISH_VISCHUNK(o0, f44, f46, g3)
finish_f46: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
vis_out_slk:
#ifdef __KERNEL__
srl asi_src, 3, %g5 ! IEU0 Group
xor asi_src, ASI_BLK_XOR1, asi_src ! IEU1
xor asi_src, %g5, asi_src ! IEU0 Group
#endif
vis_slk:ASI_SETSRC_NOBLK ! LSU Group
EXVIS3(LDDF [%o1] ASINORMAL, %f2) ! Load Group
add %o1, 8, %o1 ! IEU0
subcc %g3, 8, %g3 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
faligndata %f0, %f2, %f8 ! GRU Group
EXVIS4(STDF %f8, [%o0] ASINORMAL) ! Store
bl,pn %xcc, vis_out_slp ! CTI
add %o0, 8, %o0 ! IEU0 Group
ASI_SETSRC_NOBLK ! LSU Group
EXVIS3(LDDF [%o1] ASINORMAL, %f0) ! Load Group
add %o1, 8, %o1 ! IEU0
subcc %g3, 8, %g3 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
faligndata %f2, %f0, %f8 ! GRU Group
EXVIS4(STDF %f8, [%o0] ASINORMAL) ! Store
bge,pt %xcc, vis_slk ! CTI
add %o0, 8, %o0 ! IEU0 Group
vis_out_slp:
#ifdef __KERNEL__
brz,pt %o2, vis_ret ! CTI Group
mov %g1, %o1 ! IEU0
ba,pt %xcc, vis_slp+4 ! CTI Group
ASI_SETSRC_NOBLK ! LSU Group
#endif
vis_out:brz,pt %o2, vis_ret ! CTI Group
mov %g1, %o1 ! IEU0
#ifdef __KERNEL__
srl asi_src, 3, %g5 ! IEU0 Group
xor asi_src, ASI_BLK_XOR1, asi_src ! IEU1
xor asi_src, %g5, asi_src ! IEU0 Group
#endif
vis_slp:ASI_SETSRC_NOBLK ! LSU Group
EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD
add %o1, 1, %o1 ! IEU0
add %o0, 1, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
subcc %o2, 1, %o2 ! IEU1
bne,pt %xcc, vis_slp ! CTI
EX(STB %g5, [%o0 - 1] ASINORMAL,
add %o2, 1) ! Store Group
vis_ret:membar #StoreLoad | #StoreStore ! LSU Group
FPU_CLEAN_RETL
__memcpy_short:
andcc %o2, 1, %g0 ! IEU1 Group
be,pt %icc, 2f ! CTI
1: ASI_SETSRC_NOBLK ! LSU Group
EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD Group
add %o1, 1, %o1 ! IEU0
add %o0, 1, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
subcc %o2, 1, %o2 ! IEU1 Group
be,pn %xcc, short_ret ! CTI
EX(STB %g5, [%o0 - 1] ASINORMAL,
add %o2, 1) ! Store
2: ASI_SETSRC_NOBLK ! LSU Group
EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD Group
add %o0, 2, %o0 ! IEU0
EX2(LDUB [%o1 + 1] ASINORMAL, %o5,
sub %o0, 2, %o0,
add %o2, %g0) ! LOAD Group
add %o1, 2, %o1 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
subcc %o2, 2, %o2 ! IEU1 Group
EX(STB %g5, [%o0 - 2] ASINORMAL,
add %o2, 2) ! Store
bne,pt %xcc, 2b ! CTI
EX(STB %o5, [%o0 - 1] ASINORMAL,
add %o2, 1) ! Store
short_ret:
NORMAL_RETL
#ifndef __KERNEL__
memcpy_private:
memcpy:
#ifndef REGS_64BIT
srl %o2, 0, %o2 ! IEU1 Group
#endif
brz,pn %o2, short_ret ! CTI Group
mov %o0, %g6 ! IEU0
#endif
__memcpy_entry:
cmp %o2, 15 ! IEU1 Group
bleu,pn %xcc, __memcpy_short ! CTI
cmp %o2, (64 * 6) ! IEU1 Group
bgeu,pn %xcc, VIS_enter ! CTI
andcc %o0, 7, %g2 ! IEU1 Group
sub %o0, %o1, %g5 ! IEU0
andcc %g5, 3, %o5 ! IEU1 Group
bne,pn %xcc, memcpy_noVIS_misaligned ! CTI
andcc %o1, 3, %g0 ! IEU1 Group
#ifdef REGS_64BIT
be,a,pt %xcc, 3f ! CTI
andcc %o1, 4, %g0 ! IEU1 Group
andcc %o1, 1, %g0 ! IEU1 Group
#else /* !REGS_64BIT */
be,pt %xcc, 5f ! CTI
andcc %o1, 1, %g0 ! IEU1 Group
#endif /* !REGS_64BIT */
be,pn %xcc, 4f ! CTI
andcc %o1, 2, %g0 ! IEU1 Group
ASI_SETSRC_NOBLK ! LSU Group
EXO2(LDUB [%o1] ASINORMAL, %g2) ! Load Group
add %o1, 1, %o1 ! IEU0
add %o0, 1, %o0 ! IEU1
sub %o2, 1, %o2 ! IEU0 Group
ASI_SETDST_NOBLK ! LSU Group
bne,pn %xcc, 5f ! CTI Group
EX(STB %g2, [%o0 - 1] ASINORMAL,
add %o2, 1) ! Store
4: ASI_SETSRC_NOBLK ! LSU Group
EXO2(LDUH [%o1] ASINORMAL, %g2) ! Load Group
add %o1, 2, %o1 ! IEU0
add %o0, 2, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
sub %o2, 2, %o2 ! IEU0
EX(STH %g2, [%o0 - 2] ASINORMAL,
add %o2, 2) ! Store Group + bubble
#ifdef REGS_64BIT
5: andcc %o1, 4, %g0 ! IEU1
3: be,a,pn %xcc, 2f ! CTI
andcc %o2, -128, %g7 ! IEU1 Group
ASI_SETSRC_NOBLK ! LSU Group
EXO2(LDUW [%o1] ASINORMAL, %g5) ! Load Group
add %o1, 4, %o1 ! IEU0
add %o0, 4, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
sub %o2, 4, %o2 ! IEU0 Group
EX(STW %g5, [%o0 - 4] ASINORMAL,
add %o2, 4) ! Store
andcc %o2, -128, %g7 ! IEU1 Group
2: be,pn %xcc, 3f ! CTI
andcc %o0, 4, %g0 ! IEU1 Group
be,pn %xcc, 82f + 4 ! CTI Group
#else /* !REGS_64BIT */
5: andcc %o2, -128, %g7 ! IEU1
be,a,pn %xcc, 41f ! CTI
andcc %o2, 0x70, %g7 ! IEU1 Group
#endif /* !REGS_64BIT */
5: MOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
MOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
MOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
MOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
EXT(5b,35f,VIScopyfixup1)
35: subcc %g7, 128, %g7 ! IEU1 Group
add %o1, 128, %o1 ! IEU0
bne,pt %xcc, 5b ! CTI
add %o0, 128, %o0 ! IEU0 Group
3: andcc %o2, 0x70, %g7 ! IEU1 Group
41: be,pn %xcc, 80f ! CTI
andcc %o2, 8, %g0 ! IEU1 Group
#ifdef __KERNEL__
79: sethi %hi(80f), %o5 ! IEU0
sll %g7, 1, %g5 ! IEU0 Group
add %o1, %g7, %o1 ! IEU1
srl %g7, 1, %g2 ! IEU0 Group
sub %o5, %g5, %o5 ! IEU1
sub %o5, %g2, %o5 ! IEU0 Group
jmpl %o5 + %lo(80f), %g0 ! CTI Group brk forced
add %o0, %g7, %o0 ! IEU0 Group
#else
! Clk1 8-(
! Clk2 8-(
! Clk3 8-(
! Clk4 8-(
79: rd %pc, %o5 ! PDU Group
sll %g7, 1, %g5 ! IEU0 Group
add %o1, %g7, %o1 ! IEU1
sub %o5, %g5, %o5 ! IEU0 Group
jmpl %o5 + %lo(80f - 79b), %g0 ! CTI Group brk forced
add %o0, %g7, %o0 ! IEU0 Group
#endif
36: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
EXT(36b,80f,VIScopyfixup2)
80: be,pt %xcc, 81f ! CTI
andcc %o2, 4, %g0 ! IEU1
#ifdef REGS_64BIT
ASI_SETSRC_NOBLK ! LSU Group
EX(LDX [%o1] ASINORMAL, %g2,
and %o2, 0xf) ! Load Group
add %o0, 8, %o0 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
EX(STW %g2, [%o0 - 0x4] ASINORMAL,
and %o2, 0xf) ! Store Group
add %o1, 8, %o1 ! IEU1
srlx %g2, 32, %g2 ! IEU0 Group
EX2(STW %g2, [%o0 - 0x8] ASINORMAL,
and %o2, 0xf, %o2,
sub %o2, 4) ! Store
#else /* !REGS_64BIT */
lduw [%o1], %g2 ! Load Group
add %o0, 8, %o0 ! IEU0
lduw [%o1 + 0x4], %g3 ! Load Group
add %o1, 8, %o1 ! IEU0
stw %g2, [%o0 - 0x8] ! Store Group
stw %g3, [%o0 - 0x4] ! Store Group
#endif /* !REGS_64BIT */
81: be,pt %xcc, 1f ! CTI
andcc %o2, 2, %g0 ! IEU1 Group
ASI_SETSRC_NOBLK ! LSU Group
EX(LDUW [%o1] ASINORMAL, %g2,
and %o2, 0x7) ! Load Group
add %o1, 4, %o1 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
EX(STW %g2, [%o0] ASINORMAL,
and %o2, 0x7) ! Store Group
add %o0, 4, %o0 ! IEU0
1: be,pt %xcc, 1f ! CTI
andcc %o2, 1, %g0 ! IEU1 Group
ASI_SETSRC_NOBLK ! LSU Group
EX(LDUH [%o1] ASINORMAL, %g2,
and %o2, 0x3) ! Load Group
add %o1, 2, %o1 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
EX(STH %g2, [%o0] ASINORMAL,
and %o2, 0x3) ! Store Group
add %o0, 2, %o0 ! IEU0
1: be,pt %xcc, normal_retl ! CTI
nop ! IEU1
ASI_SETSRC_NOBLK ! LSU Group
EX(LDUB [%o1] ASINORMAL, %g2,
add %g0, 1) ! Load Group
ASI_SETDST_NOBLK ! LSU Group
EX(STB %g2, [%o0] ASINORMAL,
add %g0, 1) ! Store Group + bubble
normal_retl:
NORMAL_RETL
#ifdef REGS_64BIT
82: MOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
EXT(82b,37f,VIScopyfixup3)
37: subcc %g7, 128, %g7 ! IEU1 Group
add %o1, 128, %o1 ! IEU0
bne,pt %xcc, 82b ! CTI
add %o0, 128, %o0 ! IEU0 Group
andcc %o2, 0x70, %g7 ! IEU1
be,pn %xcc, 84f ! CTI
andcc %o2, 8, %g0 ! IEU1 Group
#ifdef __KERNEL__
83: srl %g7, 1, %g5 ! IEU0
sethi %hi(84f), %o5 ! IEU0 Group
add %g7, %g5, %g5 ! IEU1
add %o1, %g7, %o1 ! IEU0 Group
sub %o5, %g5, %o5 ! IEU1
jmpl %o5 + %lo(84f), %g0 ! CTI Group brk forced
add %o0, %g7, %o0 ! IEU0 Group
#else
! Clk1 8-(
! Clk2 8-(
! Clk3 8-(
! Clk4 8-(
83: rd %pc, %o5 ! PDU Group
add %o1, %g7, %o1 ! IEU0 Group
sub %o5, %g7, %o5 ! IEU1
jmpl %o5 + %lo(84f - 83b), %g0 ! CTI Group brk forced
add %o0, %g7, %o0 ! IEU0 Group
#endif
38: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
EXT(38b,84f,VIScopyfixup4)
84: be,pt %xcc, 85f ! CTI Group
andcc %o2, 4, %g0 ! IEU1
ASI_SETSRC_NOBLK ! LSU Group
EX(LDX [%o1] ASINORMAL, %g2,
and %o2, 0xf) ! Load Group
add %o0, 8, %o0 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
add %o1, 8, %o1 ! IEU0 Group
EX(STX %g2, [%o0 - 0x8] ASINORMAL,
and %o2, 0xf) ! Store
85: be,pt %xcc, 1f ! CTI
andcc %o2, 2, %g0 ! IEU1 Group
ASI_SETSRC_NOBLK ! LSU Group
EX(LDUW [%o1] ASINORMAL, %g2,
and %o2, 0x7) ! Load Group
add %o0, 4, %o0 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
add %o1, 4, %o1 ! IEU0 Group
EX(STW %g2, [%o0 - 0x4] ASINORMAL,
and %o2, 0x7) ! Store
1: be,pt %xcc, 1f ! CTI
andcc %o2, 1, %g0 ! IEU1 Group
ASI_SETSRC_NOBLK ! LSU Group
EX(LDUH [%o1] ASINORMAL, %g2,
and %o2, 0x3) ! Load Group
add %o0, 2, %o0 ! IEU0
ASI_SETDST_NOBLK ! LSU Group
add %o1, 2, %o1 ! IEU0 Group
EX(STH %g2, [%o0 - 0x2] ASINORMAL,
and %o2, 0x3) ! Store
1: be,pt %xcc, 1f ! CTI
nop ! IEU0 Group
ASI_SETSRC_NOBLK ! LSU Group
EX(LDUB [%o1] ASINORMAL, %g2,
add %g0, 1) ! Load Group
ASI_SETDST_NOBLK ! LSU Group
EX(STB %g2, [%o0] ASINORMAL,
add %g0, 1) ! Store Group + bubble
1: NORMAL_RETL
#endif /* REGS_64BIT */
memcpy_noVIS_misaligned:
brz,pt %g2, 2f ! CTI Group
mov 8, %g1 ! IEU0
sub %g1, %g2, %g2 ! IEU0 Group
sub %o2, %g2, %o2 ! IEU0 Group
1: ASI_SETSRC_NOBLK ! LSU Group
EX(LDUB [%o1] ASINORMAL, %g5,
add %o2, %g2) ! Load Group
add %o1, 1, %o1 ! IEU0
add %o0, 1, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
subcc %g2, 1, %g2 ! IEU1 Group
bne,pt %xcc, 1b ! CTI
EX2(STB %g5, [%o0 - 1] ASINORMAL,
add %o2, %g2, %o2,
add %o2, 1) ! Store
2:
#ifdef __KERNEL__
VISEntry
#endif
andn %o2, 7, %g5 ! IEU0 Group
and %o2, 7, %o2 ! IEU1
fmovd %f0, %f2 ! FPU
ASI_SETSRC_NOBLK ! LSU Group
alignaddr %o1, %g0, %g1 ! GRU Group
EXO2(LDDF [%g1] ASINORMAL, %f4) ! Load Group
1: EX(LDDF [%g1 + 0x8] ASINORMAL, %f6,
add %o2, %g5) ! Load Group
add %g1, 0x8, %g1 ! IEU0 Group
subcc %g5, 8, %g5 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
faligndata %f4, %f6, %f0 ! GRU Group
EX2(STDF %f0, [%o0] ASINORMAL,
add %o2, %g5, %o2,
add %o2, 8) ! Store
add %o1, 8, %o1 ! IEU0 Group
be,pn %xcc, end_cruft ! CTI
add %o0, 8, %o0 ! IEU1
ASI_SETSRC_NOBLK ! LSU Group
EX(LDDF [%g1 + 0x8] ASINORMAL, %f4,
add %o2, %g5) ! Load Group
add %g1, 8, %g1 ! IEU0
subcc %g5, 8, %g5 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
faligndata %f6, %f4, %f0 ! GRU Group
EX2(STDF %f0, [%o0] ASINORMAL,
add %o2, %g5, %o2,
add %o2, 8) ! Store
add %o1, 8, %o1 ! IEU0
ASI_SETSRC_NOBLK ! LSU Group
bne,pn %xcc, 1b ! CTI Group
add %o0, 8, %o0 ! IEU0
end_cruft:
brz,pn %o2, fpu_retl ! CTI Group
#ifndef __KERNEL__
nop ! IEU0
#else
ASI_SETSRC_NOBLK ! LSU Group
#endif
EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD
add %o1, 1, %o1 ! IEU0
add %o0, 1, %o0 ! IEU1
ASI_SETDST_NOBLK ! LSU Group
subcc %o2, 1, %o2 ! IEU1
bne,pt %xcc, vis_slp ! CTI
EX(STB %g5, [%o0 - 1] ASINORMAL,
add %o2, 1) ! Store Group
fpu_retl:
FPU_RETL
#ifdef __KERNEL__
.section .fixup
.align 4
VIScopyfixup_reto2:
mov %o2, %o1
VIScopyfixup_ret:
/* If this is copy_from_user(), zero out the rest of the
* kernel buffer.
*/
ldub [%g6 + TI_CURRENT_DS], %o4
andcc asi_src, 0x1, %g0
be,pt %icc, 1f
VISExit
andcc asi_dest, 0x1, %g0
bne,pn %icc, 1f
nop
save %sp, -160, %sp
mov %i0, %o0
call __bzero
mov %i1, %o1
restore
1: mov %o1, %o0
retl
wr %o4, %g0, %asi
VIScopyfixup1: subcc %g2, 18, %g2
add %o0, 32, %o0
bgeu,a,pt %icc, VIScopyfixup1
sub %g7, 32, %g7
sub %o0, 32, %o0
rd %pc, %g5
add %g2, (18 + 16), %g2
ldub [%g5 + %g2], %g2
ba,a,pt %xcc, 2f
.byte 0, 0, 0, 0, 0, 0, 0, 4, 4, 8, 12, 12, 16, 20, 20, 24, 28, 28
.align 4
VIScopyfixup2: mov (7 * 16), %g7
1: subcc %g2, 10, %g2
bgeu,a,pt %icc, 1b
sub %g7, 16, %g7
sub %o0, %g7, %o0
rd %pc, %g5
add %g2, (10 + 16), %g2
ldub [%g5 + %g2], %g2
ba,a,pt %xcc, 4f
.byte 0, 0, 0, 0, 0, 4, 4, 8, 12, 12
.align 4
VIScopyfixup3: subcc %g2, 10, %g2
add %o0, 32, %o0
bgeu,a,pt %icc, VIScopyfixup3
sub %g7, 32, %g7
sub %o0, 32, %o0
rd %pc, %g5
add %g2, (10 + 16), %g2
ldub [%g5 + %g2], %g2
ba,a,pt %xcc, 2f
.byte 0, 0, 0, 0, 0, 0, 0, 8, 16, 24
.align 4
2: and %o2, 0x7f, %o2
sub %g7, %g2, %g7
ba,pt %xcc, VIScopyfixup_ret
add %g7, %o2, %o1
VIScopyfixup4: mov (7 * 16), %g7
3: subcc %g2, 6, %g2
bgeu,a,pt %icc, 3b
sub %g7, 16, %g7
sub %o0, %g7, %o0
rd %pc, %g5
add %g2, (6 + 16), %g2
ldub [%g5 + %g2], %g2
ba,a,pt %xcc, 4f
.byte 0, 0, 0, 0, 0, 8
.align 4
4: and %o2, 0xf, %o2
sub %g7, %g2, %g7
ba,pt %xcc, VIScopyfixup_ret
add %g7, %o2, %o1
VIScopyfixup_vis2:
sub %o2, 0x40, %o2
VIScopyfixup_vis0:
add %o2, 0x80, %o2
VIScopyfixup_vis1:
add %g7, %g3, %g7
ba,pt %xcc, VIScopyfixup_ret
add %o2, %g7, %o1
VIScopyfixup_vis4:
add %g3, 8, %g3
VIScopyfixup_vis3:
add %g3, 8, %g3
ba,pt %xcc, VIScopyfixup_ret
add %o2, %g3, %o1
#endif
#ifdef __KERNEL__
.text
.align 32
.globl __memmove
.type __memmove,@function
.globl memmove
.type memmove,@function
memmove:
__memmove: cmp %o0, %o1
blu,pt %xcc, memcpy_private
sub %o0, %o1, %g5
add %o1, %o2, %g3
cmp %g3, %o0
bleu,pt %xcc, memcpy_private
add %o1, %o2, %g5
add %o0, %o2, %o5
sub %g5, 1, %o1
sub %o5, 1, %o0
1: ldub [%o1], %g5
subcc %o2, 1, %o2
sub %o1, 1, %o1
stb %g5, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
retl
clr %o0
#endif
/* U3copy_in_user.S: UltraSparc-III optimized memcpy. /* copy_in_user.S: Copy from userspace to userspace.
* *
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/ */
#include <asm/visasm.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#define XCC xcc #define XCC xcc
#define EXNV(x,y,a,b) \ #define EX(x,y) \
98: x,y; \ 98: x,y; \
.section .fixup; \ .section .fixup; \
.align 4; \ .align 4; \
99: retl; \ 99: retl; \
a, b, %o0; \ mov 1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV1(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
add %o0, 1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
add %o0, 4, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define EXNV8(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
add %o0, 8, %o0; \
.section __ex_table; \ .section __ex_table; \
.align 4; \ .align 4; \
.word 98b, 99b; \ .word 98b, 99b; \
...@@ -70,71 +31,84 @@ ...@@ -70,71 +31,84 @@
* to copy register windows around during thread cloning. * to copy register windows around during thread cloning.
*/ */
.globl U3copy_in_user .globl ___copy_in_user
U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it. /* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively * Reading %asi to check for KERNEL_DS is comparatively
* cheap. * cheap.
*/ */
rd %asi, %g1 rd %asi, %g1
cmp %g1, ASI_AIUS cmp %g1, ASI_AIUS
bne,pn %icc, U3memcpy_user_stub bne,pn %icc, memcpy_user_stub
nop nop
cmp %o2, 0 cmp %o2, 0
be,pn %XCC, out be,pn %XCC, 85f
or %o0, %o1, %o3 or %o0, %o1, %o3
cmp %o2, 16 cmp %o2, 16
bleu,a,pn %XCC, small_copy bleu,a,pn %XCC, 80f
or %o3, %o2, %o3 or %o3, %o2, %o3
medium_copy: /* 16 < len <= 64 */ /* 16 < len <= 64 */
andcc %o3, 0x7, %g0 andcc %o3, 0x7, %g0
bne,pn %XCC, small_copy_unaligned bne,pn %XCC, 90f
sub %o0, %o1, %o3 sub %o0, %o1, %o3
medium_copy_aligned:
andn %o2, 0x7, %o4 andn %o2, 0x7, %o4
and %o2, 0x7, %o2 and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4 1: subcc %o4, 0x8, %o4
EXNV8(ldxa [%o1] %asi, %o5, add %o4, %o2) EX(ldxa [%o1] %asi, %o5)
EXNV8(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2) EX(stxa %o5, [%o1 + %o3] ASI_AIUS)
bgu,pt %XCC, 1b bgu,pt %XCC, 1b
add %o1, 0x8, %o1 add %o1, 0x8, %o1
andcc %o2, 0x4, %g0 andcc %o2, 0x4, %g0
be,pt %XCC, 1f be,pt %XCC, 1f
nop nop
sub %o2, 0x4, %o2 sub %o2, 0x4, %o2
EXNV4(lduwa [%o1] %asi, %o5, add %o4, %o2) EX(lduwa [%o1] %asi, %o5)
EXNV4(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2) EX(stwa %o5, [%o1 + %o3] ASI_AIUS)
add %o1, 0x4, %o1 add %o1, 0x4, %o1
1: cmp %o2, 0 1: cmp %o2, 0
be,pt %XCC, out be,pt %XCC, 85f
nop nop
ba,pt %xcc, small_copy_unaligned ba,pt %xcc, 90f
nop nop
small_copy: /* 0 < len <= 16 */ 80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0 andcc %o3, 0x3, %g0
bne,pn %XCC, small_copy_unaligned bne,pn %XCC, 90f
sub %o0, %o1, %o3 sub %o0, %o1, %o3
small_copy_aligned: 82:
subcc %o2, 4, %o2 subcc %o2, 4, %o2
EXNV4(lduwa [%o1] %asi, %g1, add %o2, %g0) EX(lduwa [%o1] %asi, %g1)
EXNV4(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0) EX(stwa %g1, [%o1 + %o3] ASI_AIUS)
bgu,pt %XCC, small_copy_aligned bgu,pt %XCC, 82b
add %o1, 4, %o1 add %o1, 4, %o1
out: retl 85: retl
clr %o0 clr %o0
.align 32 .align 32
small_copy_unaligned: 90:
subcc %o2, 1, %o2 subcc %o2, 1, %o2
EXNV1(lduba [%o1] %asi, %g1, add %o2, %g0) EX(lduba [%o1] %asi, %g1)
EXNV1(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0) EX(stba %g1, [%o1 + %o3] ASI_AIUS)
bgu,pt %XCC, small_copy_unaligned bgu,pt %XCC, 90b
add %o1, 1, %o1 add %o1, 1, %o1
retl retl
clr %o0 clr %o0
/* Act like copy_{to,in}_user(), ie. return zero instead
* of original destination pointer. This is invoked when
* copy_{to,in}_user() finds that %asi is kernel space.
*/
.globl memcpy_user_stub
memcpy_user_stub:
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
call memcpy
mov %i2, %o2
ret
restore %g0, %g0, %o0
/* memmove.S: Simple memmove implementation.
*
* Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
.text
.align 32
.globl memmove
memmove:
mov %o0, %g1
cmp %o0, %o1
blu,pt %xcc, memcpy
sub %o0, %o1, %g5
add %o1, %o2, %g3
cmp %g3, %o0
bleu,pt %xcc, memcpy
add %o1, %o2, %g5
add %o0, %o2, %o5
sub %g5, 1, %o1
sub %o5, 1, %o0
1: ldub [%o1], %g5
subcc %o2, 1, %o2
sub %o1, 1, %o1
stb %g5, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
retl
mov %g1, %o0
/* user_fixup.c: Fix up user copy faults.
*
* Copyright (C) 2004 David S. Miller <davem@redhat.com>
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
/* Calculating the exact fault address when using
* block loads and stores can be very complicated.
* Instead of trying to be clever and handling all
* of the cases, just fix things up simply here.
*/
unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
{
char *dst = to;
const char __user *src = from;
while (size--) {
if (__get_user(*dst, src))
break;
dst++;
src++;
}
if (size)
memset(dst, 0, size);
return size;
}
unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
{
char __user *dst = to;
const char *src = from;
while (size--) {
if (__put_user(*src, dst))
break;
dst++;
src++;
}
return size;
}
unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
{
char __user *dst = to;
char __user *src = from;
while (size--) {
char tmp;
if (__get_user(tmp, src))
break;
if (__put_user(tmp, dst))
break;
dst++;
src++;
}
return size;
}
...@@ -15,35 +15,25 @@ ...@@ -15,35 +15,25 @@
#include <asm/asi.h> #include <asm/asi.h>
extern void __memmove(void *,const void *,__kernel_size_t);
extern void *__memset(void *,int,__kernel_size_t); extern void *__memset(void *,int,__kernel_size_t);
extern void *__builtin_memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS #ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */ /* First the mem*() things. */
#define __HAVE_ARCH_BCOPY
#define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
#undef memmove
#define memmove(_to, _from, _n) \
({ \
void *_t = (_to); \
__memmove(_t, (_from), (_n)); \
_t; \
})
#define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void * memcpy(void *,const void *,__kernel_size_t);
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
extern void *__builtin_memset(void *,int,__kernel_size_t);
static inline void *__constant_memset(void *s, int c, __kernel_size_t count) static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
{ {
extern __kernel_size_t __bzero(void *, __kernel_size_t); extern __kernel_size_t __bzero(void *, __kernel_size_t);
if(!c) { if (!c) {
__bzero(s, count); __bzero(s, count);
return s; return s;
} else } else
......
...@@ -252,18 +252,50 @@ __asm__ __volatile__( \ ...@@ -252,18 +252,50 @@ __asm__ __volatile__( \
extern int __get_user_bad(void); extern int __get_user_bad(void);
extern unsigned long __copy_from_user(void *to, const void __user *from, extern unsigned long ___copy_from_user(void *to, const void __user *from,
unsigned long size); unsigned long size);
extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
unsigned long size);
static inline unsigned long copy_from_user(void *to, const void __user *from,
unsigned long size)
{
unsigned long ret = ___copy_from_user(to, from, size);
if (ret)
ret = copy_from_user_fixup(to, from, size);
return ret;
}
#define __copy_from_user copy_from_user
extern unsigned long __copy_to_user(void __user *to, const void *from, extern unsigned long ___copy_to_user(void __user *to, const void *from,
unsigned long size);
extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
unsigned long size); unsigned long size);
static inline unsigned long copy_to_user(void __user *to, const void *from,
unsigned long size)
{
unsigned long ret = ___copy_to_user(to, from, size);
extern unsigned long __copy_in_user(void __user *to, const void __user *from, if (ret)
ret = copy_to_user_fixup(to, from, size);
return ret;
}
#define __copy_to_user copy_to_user
extern unsigned long ___copy_in_user(void __user *to, const void __user *from,
unsigned long size);
extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
unsigned long size); unsigned long size);
static inline unsigned long copy_in_user(void __user *to, void __user *from,
unsigned long size)
{
unsigned long ret = ___copy_in_user(to, from, size);
#define copy_from_user __copy_from_user if (ret)
#define copy_to_user __copy_to_user ret = copy_in_user_fixup(to, from, size);
#define copy_in_user __copy_in_user return ret;
}
#define __copy_in_user copy_in_user
extern unsigned long __bzero_noasi(void __user *, unsigned long); extern unsigned long __bzero_noasi(void __user *, unsigned long);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment