Commit ca5d3f14 authored by Ingo Molnar's avatar Ingo Molnar

x86: clean up mmx_32.c

checkpatch.pl --file cleanups:

  before:
    total: 74 errors, 3 warnings, 386 lines checked

  after:
    total: 0 errors, 0 warnings, 377 lines checked

no code changed:

arch/x86/lib/mmx_32.o:
   text    data     bss     dec     hex filename
   1323       0       8    1331     533 mmx_32.o.before
   1323       0       8    1331     533 mmx_32.o.after

md5:
   4cc39f1017dc40a5ebf02ce0ff7312bc  mmx_32.o.before.asm
   4cc39f1017dc40a5ebf02ce0ff7312bc  mmx_32.o.after.asm
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 04aaa7ba
No related merge requests found
#include <linux/types.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <asm/asm.h>
#include <asm/i387.h>
/* /*
* MMX 3DNow! library helper functions * MMX 3DNow! library helper functions
* *
* To do: * To do:
* We can use MMX just for prefetch in IRQ's. This may be a win. * We can use MMX just for prefetch in IRQ's. This may be a win.
* (reported so on K6-III) * (reported so on K6-III)
* We should use a better code neutral filler for the short jump * We should use a better code neutral filler for the short jump
* leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
* We also want to clobber the filler register so we don't get any * We also want to clobber the filler register so we don't get any
* register forwarding stalls on the filler. * register forwarding stalls on the filler.
* *
* Add *user handling. Checksums are not a win with MMX on any CPU * Add *user handling. Checksums are not a win with MMX on any CPU
* tested so far for any MMX solution figured. * tested so far for any MMX solution figured.
* *
* 22/09/2000 - Arjan van de Ven * 22/09/2000 - Arjan van de Ven
* Improved for non-egineering-sample Athlons * Improved for non-egineering-sample Athlons
* *
*/ */
#include <linux/hardirq.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/i387.h>
#include <asm/asm.h>
void *_mmx_memcpy(void *to, const void *from, size_t len) void *_mmx_memcpy(void *to, const void *from, size_t len)
{ {
void *p; void *p;
...@@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) ...@@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
_ASM_EXTABLE(1b,3b) _ASM_EXTABLE(1b, 3b)
: : "r" (from) ); : : "r" (from));
for ( ; i > 5; i--) {
for(; i>5; i--)
{
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: prefetch 320(%0)\n" "1: prefetch 320(%0)\n"
"2: movq (%0), %%mm0\n" "2: movq (%0), %%mm0\n"
...@@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) ...@@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
_ASM_EXTABLE(1b,3b) _ASM_EXTABLE(1b, 3b)
: : "r" (from), "r" (to) : "memory"); : : "r" (from), "r" (to) : "memory");
from+=64;
to+=64; from += 64;
to += 64;
} }
for(; i>0; i--) for ( ; i > 0; i--) {
{
__asm__ __volatile__ ( __asm__ __volatile__ (
" movq (%0), %%mm0\n" " movq (%0), %%mm0\n"
" movq 8(%0), %%mm1\n" " movq 8(%0), %%mm1\n"
...@@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) ...@@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
" movq %%mm1, 40(%1)\n" " movq %%mm1, 40(%1)\n"
" movq %%mm2, 48(%1)\n" " movq %%mm2, 48(%1)\n"
" movq %%mm3, 56(%1)\n" " movq %%mm3, 56(%1)\n"
: : "r" (from), "r" (to) : "memory"); : : "r" (from), "r" (to) : "memory");
from+=64;
to+=64; from += 64;
to += 64;
} }
/* /*
* Now do the tail of the block * Now do the tail of the block:
*/ */
__memcpy(to, from, len&63); __memcpy(to, from, len & 63);
kernel_fpu_end(); kernel_fpu_end();
return p; return p;
} }
EXPORT_SYMBOL(_mmx_memcpy);
#ifdef CONFIG_MK7 #ifdef CONFIG_MK7
...@@ -128,13 +127,12 @@ static void fast_clear_page(void *page) ...@@ -128,13 +127,12 @@ static void fast_clear_page(void *page)
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin();
__asm__ __volatile__ ( __asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : : " pxor %%mm0, %%mm0\n" : :
); );
for(i=0;i<4096/64;i++) for (i = 0; i < 4096/64; i++) {
{
__asm__ __volatile__ ( __asm__ __volatile__ (
" movntq %%mm0, (%0)\n" " movntq %%mm0, (%0)\n"
" movntq %%mm0, 8(%0)\n" " movntq %%mm0, 8(%0)\n"
...@@ -145,14 +143,15 @@ static void fast_clear_page(void *page) ...@@ -145,14 +143,15 @@ static void fast_clear_page(void *page)
" movntq %%mm0, 48(%0)\n" " movntq %%mm0, 48(%0)\n"
" movntq %%mm0, 56(%0)\n" " movntq %%mm0, 56(%0)\n"
: : "r" (page) : "memory"); : : "r" (page) : "memory");
page+=64; page += 64;
} }
/* since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again. /*
* Since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again:
*/ */
__asm__ __volatile__ ( __asm__ __volatile__("sfence\n"::);
" sfence \n" : :
);
kernel_fpu_end(); kernel_fpu_end();
} }
...@@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from) ...@@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from)
kernel_fpu_begin(); kernel_fpu_begin();
/* maybe the prefetch stuff can go before the expensive fnsave... /*
* maybe the prefetch stuff can go before the expensive fnsave...
* but that is for later. -AV * but that is for later. -AV
*/ */
__asm__ __volatile__ ( __asm__ __volatile__(
"1: prefetch (%0)\n" "1: prefetch (%0)\n"
" prefetch 64(%0)\n" " prefetch 64(%0)\n"
" prefetch 128(%0)\n" " prefetch 128(%0)\n"
...@@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from) ...@@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from)
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
_ASM_EXTABLE(1b,3b) _ASM_EXTABLE(1b, 3b) : : "r" (from));
: : "r" (from) );
for(i=0; i<(4096-320)/64; i++) for (i = 0; i < (4096-320)/64; i++) {
{
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: prefetch 320(%0)\n" "1: prefetch 320(%0)\n"
"2: movq (%0), %%mm0\n" "2: movq (%0), %%mm0\n"
...@@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from) ...@@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from)
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
_ASM_EXTABLE(1b,3b) _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
: : "r" (from), "r" (to) : "memory");
from+=64; from += 64;
to+=64; to += 64;
} }
for(i=(4096-320)/64; i<4096/64; i++)
{ for (i = (4096-320)/64; i < 4096/64; i++) {
__asm__ __volatile__ ( __asm__ __volatile__ (
"2: movq (%0), %%mm0\n" "2: movq (%0), %%mm0\n"
" movntq %%mm0, (%1)\n" " movntq %%mm0, (%1)\n"
...@@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from) ...@@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from)
" movntq %%mm6, 48(%1)\n" " movntq %%mm6, 48(%1)\n"
" movq 56(%0), %%mm7\n" " movq 56(%0), %%mm7\n"
" movntq %%mm7, 56(%1)\n" " movntq %%mm7, 56(%1)\n"
: : "r" (from), "r" (to) : "memory"); : : "r" (from), "r" (to) : "memory");
from+=64; from += 64;
to+=64; to += 64;
} }
/* since movntq is weakly-ordered, a "sfence" is needed to become /*
* ordered again. * Since movntq is weakly-ordered, a "sfence" is needed to become
* ordered again:
*/ */
__asm__ __volatile__ ( __asm__ __volatile__("sfence \n"::);
" sfence \n" : :
);
kernel_fpu_end(); kernel_fpu_end();
} }
#else #else /* CONFIG_MK7 */
/* /*
* Generic MMX implementation without K7 specific streaming * Generic MMX implementation without K7 specific streaming
*/ */
static void fast_clear_page(void *page) static void fast_clear_page(void *page)
{ {
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin();
__asm__ __volatile__ ( __asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : : " pxor %%mm0, %%mm0\n" : :
); );
for(i=0;i<4096/128;i++) for (i = 0; i < 4096/128; i++) {
{
__asm__ __volatile__ ( __asm__ __volatile__ (
" movq %%mm0, (%0)\n" " movq %%mm0, (%0)\n"
" movq %%mm0, 8(%0)\n" " movq %%mm0, 8(%0)\n"
...@@ -275,8 +270,8 @@ static void fast_clear_page(void *page) ...@@ -275,8 +270,8 @@ static void fast_clear_page(void *page)
" movq %%mm0, 104(%0)\n" " movq %%mm0, 104(%0)\n"
" movq %%mm0, 112(%0)\n" " movq %%mm0, 112(%0)\n"
" movq %%mm0, 120(%0)\n" " movq %%mm0, 120(%0)\n"
: : "r" (page) : "memory"); : : "r" (page) : "memory");
page+=128; page += 128;
} }
kernel_fpu_end(); kernel_fpu_end();
...@@ -285,8 +280,7 @@ static void fast_clear_page(void *page) ...@@ -285,8 +280,7 @@ static void fast_clear_page(void *page)
static void fast_copy_page(void *to, void *from) static void fast_copy_page(void *to, void *from)
{ {
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin();
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from) ...@@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from)
"3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
_ASM_EXTABLE(1b,3b) _ASM_EXTABLE(1b, 3b) : : "r" (from));
: : "r" (from) );
for(i=0; i<4096/64; i++) for (i = 0; i < 4096/64; i++) {
{
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: prefetch 320(%0)\n" "1: prefetch 320(%0)\n"
"2: movq (%0), %%mm0\n" "2: movq (%0), %%mm0\n"
...@@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from) ...@@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from)
"3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
" jmp 2b\n" " jmp 2b\n"
".previous\n" ".previous\n"
_ASM_EXTABLE(1b,3b) _ASM_EXTABLE(1b, 3b)
: : "r" (from), "r" (to) : "memory"); : : "r" (from), "r" (to) : "memory");
from+=64;
to+=64; from += 64;
to += 64;
} }
kernel_fpu_end(); kernel_fpu_end();
} }
#endif /* !CONFIG_MK7 */
#endif
/* /*
* Favour MMX for page clear and copy. * Favour MMX for page clear and copy:
*/ */
static void slow_zero_page(void *page)
static void slow_zero_page(void * page)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( \
"cld\n\t" \ __asm__ __volatile__(
"rep ; stosl" \ "cld\n\t"
: "=&c" (d0), "=&D" (d1) "rep ; stosl"
:"a" (0),"1" (page),"0" (1024)
:"memory"); : "=&c" (d0), "=&D" (d1)
:"a" (0), "1" (page), "0" (1024)
:"memory");
} }
void mmx_clear_page(void * page) void mmx_clear_page(void *page)
{ {
if(unlikely(in_interrupt())) if (unlikely(in_interrupt()))
slow_zero_page(page); slow_zero_page(page);
else else
fast_clear_page(page); fast_clear_page(page);
} }
EXPORT_SYMBOL(mmx_clear_page);
static void slow_copy_page(void *to, void *from) static void slow_copy_page(void *to, void *from)
{ {
int d0, d1, d2; int d0, d1, d2;
__asm__ __volatile__( \
"cld\n\t" \ __asm__ __volatile__(
"rep ; movsl" \ "cld\n\t"
: "=&c" (d0), "=&D" (d1), "=&S" (d2) \ "rep ; movsl"
: "0" (1024),"1" ((long) to),"2" ((long) from) \ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (1024), "1" ((long) to), "2" ((long) from)
: "memory"); : "memory");
} }
void mmx_copy_page(void *to, void *from) void mmx_copy_page(void *to, void *from)
{ {
if(unlikely(in_interrupt())) if (unlikely(in_interrupt()))
slow_copy_page(to, from); slow_copy_page(to, from);
else else
fast_copy_page(to, from); fast_copy_page(to, from);
} }
EXPORT_SYMBOL(_mmx_memcpy);
EXPORT_SYMBOL(mmx_clear_page);
EXPORT_SYMBOL(mmx_copy_page); EXPORT_SYMBOL(mmx_copy_page);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment