Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
1a4fded6
Commit
1a4fded6
authored
Mar 20, 2017
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
mips: get rid of tail-zeroing in primitives
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
ab0aca27
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
3 additions
and
96 deletions
+3
-96
arch/mips/cavium-octeon/octeon-memcpy.S
arch/mips/cavium-octeon/octeon-memcpy.S
+1
-30
arch/mips/include/asm/uaccess.h
arch/mips/include/asm/uaccess.h
+2
-17
arch/mips/lib/memcpy.S
arch/mips/lib/memcpy.S
+0
-49
No files found.
arch/mips/cavium-octeon/octeon-memcpy.S
View file @
1a4fded6
...
...
@@ -139,15 +139,6 @@
.
set
noreorder
.
set
noat
/*
*
t7
is
used
as
a
flag
to
note
inatomic
mode
.
*/
LEAF
(
__copy_user_inatomic
)
EXPORT_SYMBOL
(
__copy_user_inatomic
)
b
__copy_user_common
li
t7
,
1
END
(
__copy_user_inatomic
)
/*
*
A
combined
memcpy
/
__copy_user
*
__copy_user
sets
len
to
0
for
success
; else to an upper bound of
...
...
@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
__memcpy
:
FEXPORT
(
__copy_user
)
EXPORT_SYMBOL
(
__copy_user
)
li
t7
,
0
/*
not
inatomic
*/
__copy_user_common
:
/
*
*
Note
:
dst
&
src
may
be
unaligned
,
len
may
be
0
*
Temps
...
...
@@ -414,25 +403,7 @@ l_exc:
LOAD
t0
,
TI_TASK
(
$
28
)
LOAD
t0
,
THREAD_BUADDR
(
t0
)
#
t0
is
just
past
last
good
address
SUB
len
,
AT
,
t0
#
len
number
of
uncopied
bytes
bnez
t7
,
2
f
/*
Skip
the
zeroing
out
part
if
inatomic
*/
/
*
*
Here
's where we rely on src and dst being incremented in tandem,
*
See
(
3
)
above
.
*
dst
+=
(
fault
addr
-
src
)
to
put
dst
at
first
byte
to
clear
*/
ADD
dst
,
t0
#
compute
start
address
in
a1
SUB
dst
,
src
/
*
*
Clear
len
bytes
starting
at
dst
.
Can
't call __bzero because it
*
might
modify
len
.
An
inefficient
loop
for
these
rare
times
...
*/
beqz
len
,
done
SUB
src
,
len
,
1
1
:
sb
zero
,
0
(
dst
)
ADD
dst
,
dst
,
1
bnez
src
,
1
b
SUB
src
,
src
,
1
2
:
jr
ra
jr
ra
nop
...
...
arch/mips/include/asm/uaccess.h
View file @
1a4fded6
...
...
@@ -841,9 +841,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
__invoke_copy_from(__copy_user_inatomic, to, from, n)
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
...
...
@@ -854,9 +851,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \
__invoke_copy_from(__copy_user_inatomic, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
...
...
@@ -867,8 +861,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
/* EVA specific functions */
extern
size_t
__copy_user_inatomic_eva
(
void
*
__to
,
const
void
*
__from
,
size_t
__n
);
extern
size_t
__copy_from_user_eva
(
void
*
__to
,
const
void
*
__from
,
size_t
__n
);
extern
size_t
__copy_to_user_eva
(
void
*
__to
,
const
void
*
__from
,
...
...
@@ -882,9 +874,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_from_user_eva, to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \
__invoke_copy_from(__copy_user_inatomic_eva, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_to_user_eva, to, from, n)
...
...
@@ -930,8 +919,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_len; \
})
extern
size_t
__copy_user_inatomic
(
void
*
__to
,
const
void
*
__from
,
size_t
__n
);
#define __copy_to_user_inatomic(to, from, n) \
({ \
void __user *__cu_to; \
...
...
@@ -966,12 +953,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
__cu_from,\
__cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\
__cu_len);\
else \
__cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
__cu_from, \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
...
...
arch/mips/lib/memcpy.S
View file @
1a4fded6
...
...
@@ -562,39 +562,9 @@
LOADK
t0
,
THREAD_BUADDR
(
t0
)
#
t0
is
just
past
last
good
address
nop
SUB
len
,
AT
,
t0
#
len
number
of
uncopied
bytes
bnez
t6
,
.
Ldone
\
@
/*
Skip
the
zeroing
part
if
inatomic
*/
/
*
*
Here
's where we rely on src and dst being incremented in tandem,
*
See
(
3
)
above
.
*
dst
+=
(
fault
addr
-
src
)
to
put
dst
at
first
byte
to
clear
*/
ADD
dst
,
t0
#
compute
start
address
in
a1
SUB
dst
,
src
/
*
*
Clear
len
bytes
starting
at
dst
.
Can
't call __bzero because it
*
might
modify
len
.
An
inefficient
loop
for
these
rare
times
...
*/
.
set
reorder
/*
DADDI_WAR
*/
SUB
src
,
len
,
1
beqz
len
,
.
Ldone
\
@
.
set
noreorder
1
:
sb
zero
,
0
(
dst
)
ADD
dst
,
dst
,
1
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
bnez
src
,
1
b
SUB
src
,
src
,
1
#else
.
set
push
.
set
noat
li
v1
,
1
bnez
src
,
1
b
SUB
src
,
src
,
v1
.
set
pop
#endif
jr
ra
nop
#define SEXC(n) \
.
set
reorder
; /* DADDI_WAR */ \
.
Ls_exc_p
#
#
n
##
u
\
@
:
\
...
...
@@ -672,15 +642,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
move
a2
,
zero
END
(
__rmemcpy
)
/*
*
t6
is
used
as
a
flag
to
note
inatomic
mode
.
*/
LEAF
(
__copy_user_inatomic
)
EXPORT_SYMBOL
(
__copy_user_inatomic
)
b
__copy_user_common
li
t6
,
1
END
(
__copy_user_inatomic
)
/*
*
A
combined
memcpy
/
__copy_user
*
__copy_user
sets
len
to
0
for
success
; else to an upper bound of
...
...
@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
.
L__memcpy
:
FEXPORT
(
__copy_user
)
EXPORT_SYMBOL
(
__copy_user
)
li
t6
,
0
/*
not
inatomic
*/
__copy_user_common
:
/
*
Legacy
Mode
,
user
<->
user
*/
__BUILD_COPY_USER
LEGACY_MODE
USEROP
USEROP
...
...
@@ -708,20 +667,12 @@ __copy_user_common:
*
space
*/
LEAF
(
__copy_user_inatomic_eva
)
EXPORT_SYMBOL
(
__copy_user_inatomic_eva
)
b
__copy_from_user_common
li
t6
,
1
END
(
__copy_user_inatomic_eva
)
/*
*
__copy_from_user
(
EVA
)
*/
LEAF
(
__copy_from_user_eva
)
EXPORT_SYMBOL
(
__copy_from_user_eva
)
li
t6
,
0
/*
not
inatomic
*/
__copy_from_user_common
:
__BUILD_COPY_USER
EVA_MODE
USEROP
KERNELOP
END
(
__copy_from_user_eva
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment