Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3a0e75ad
Commit
3a0e75ad
authored
Mar 22, 2017
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
xtensa: get rid of zeroing, use RAW_COPY_USER
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
0b46a94e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
57 additions
and
114 deletions
+57
-114
arch/xtensa/Kconfig
arch/xtensa/Kconfig
+1
-0
arch/xtensa/include/asm/uaccess.h
arch/xtensa/include/asm/uaccess.h
+8
-46
arch/xtensa/lib/usercopy.S
arch/xtensa/lib/usercopy.S
+48
-68
No files found.
arch/xtensa/Kconfig
View file @
3a0e75ad
...
...
@@ -29,6 +29,7 @@ config XTENSA
select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
select ARCH_HAS_RAW_COPY_USER
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
...
...
arch/xtensa/include/asm/uaccess.h
View file @
3a0e75ad
...
...
@@ -234,60 +234,22 @@ __asm__ __volatile__( \
* Copy to/from user space
*/
/*
* We use a generic, arbitrary-sized copy subroutine. The Xtensa
* architecture would cause heavy code bloat if we tried to inline
* these functions and provide __constant_copy_* equivalents like the
* i386 versions. __xtensa_copy_user is quite efficient. See the
* .fixup section of __xtensa_copy_user for a discussion on the
* X_zeroing equivalents for Xtensa.
*/
extern
unsigned
__xtensa_copy_user
(
void
*
to
,
const
void
*
from
,
unsigned
n
);
#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
static
inline
unsigned
long
__generic_copy_from_user_nocheck
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
raw_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
return
__copy_user
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__generic_copy_to_user_nocheck
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
return
__copy_user
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__generic_copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
prefetch
(
from
);
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
return
__copy_user
(
to
,
from
,
n
);
return
n
;
prefetchw
(
to
);
return
__xtensa_copy_user
(
to
,
(
__force
const
void
*
)
from
,
n
);
}
static
inline
unsigned
long
__generic_copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
raw_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
prefetchw
(
to
);
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
return
__copy_user
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
prefetchw
(
from
);
return
__xtensa_copy_user
((
__force
void
*
)
to
,
from
,
n
);
}
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
#define __copy_to_user(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_from_user(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
/*
* We need to return the number of bytes not cleared. Our memset()
...
...
arch/xtensa/lib/usercopy.S
View file @
3a0e75ad
...
...
@@ -102,9 +102,9 @@ __xtensa_copy_user:
bltui
a4
,
7
,
.
Lbytecopy
#
do
short
copies
byte
by
byte
#
copy
1
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
1
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
1
addi
a4
,
a4
,
-
1
bbci.l
a5
,
1
,
.
Ldstaligned
#
if
dst
is
now
aligned
,
then
...
...
@@ -112,11 +112,11 @@ __xtensa_copy_user:
.
Ldst2mod4
:
#
dst
16
-
bit
aligned
#
copy
2
bytes
bltui
a4
,
6
,
.
Lbytecopy
#
do
short
copies
byte
by
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
fixup
)
addi
a3
,
a3
,
2
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
fixup
)
addi
a5
,
a5
,
2
addi
a4
,
a4
,
-
2
j
.
Ldstaligned
#
dst
is
now
aligned
,
return
to
main
algorithm
...
...
@@ -135,9 +135,9 @@ __xtensa_copy_user:
add
a7
,
a3
,
a4
#
a7
=
end
address
for
source
#endif /* !XCHAL_HAVE_LOOPS */
.
Lnextbyte
:
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
1
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
1
#if !XCHAL_HAVE_LOOPS
blt
a3
,
a7
,
.
Lnextbyte
...
...
@@ -161,15 +161,15 @@ __xtensa_copy_user:
add
a8
,
a8
,
a3
#
a8
=
end
of
last
16
B
source
chunk
#endif /* !XCHAL_HAVE_LOOPS */
.
Loop1
:
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l32i
,
a6
,
a3
,
8
,
l_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
l32i
,
a7
,
a3
,
12
,
l_
fixup
)
EX
(
s32i
,
a6
,
a5
,
8
,
s_
fixup
)
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
EX
(
l32i
,
a6
,
a3
,
8
,
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
EX
(
l32i
,
a7
,
a3
,
12
,
fixup
)
EX
(
s32i
,
a6
,
a5
,
8
,
fixup
)
addi
a3
,
a3
,
16
EX
(
s32i
,
a7
,
a5
,
12
,
s_
fixup
)
EX
(
s32i
,
a7
,
a5
,
12
,
fixup
)
addi
a5
,
a5
,
16
#if !XCHAL_HAVE_LOOPS
blt
a3
,
a8
,
.
Loop1
...
...
@@ -177,31 +177,31 @@ __xtensa_copy_user:
.
Loop1done
:
bbci.l
a4
,
3
,
.
L2
#
copy
8
bytes
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
addi
a3
,
a3
,
8
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
addi
a5
,
a5
,
8
.
L2
:
bbci.l
a4
,
2
,
.
L3
#
copy
4
bytes
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
4
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
4
.
L3
:
bbci.l
a4
,
1
,
.
L4
#
copy
2
bytes
EX
(
l16ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l16ui
,
a6
,
a3
,
0
,
fixup
)
addi
a3
,
a3
,
2
EX
(
s16i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s16i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
2
.
L4
:
bbci.l
a4
,
0
,
.
L5
#
copy
1
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
.
L5
:
movi
a2
,
0
#
return
success
for
len
bytes
copied
retw
...
...
@@ -217,7 +217,7 @@ __xtensa_copy_user:
#
copy
16
bytes
per
iteration
for
word
-
aligned
dst
and
unaligned
src
and
a10
,
a3
,
a8
#
save
unalignment
offset
for
below
sub
a3
,
a3
,
a10
#
align
a3
(
to
avoid
sim
warnings
only
; not needed for hardware)
EX
(
l32i
,
a6
,
a3
,
0
,
l_
fixup
)
#
load
first
word
EX
(
l32i
,
a6
,
a3
,
0
,
fixup
)
#
load
first
word
#if XCHAL_HAVE_LOOPS
loopnez
a7
,
.
Loop2done
#else /* !XCHAL_HAVE_LOOPS */
...
...
@@ -226,19 +226,19 @@ __xtensa_copy_user:
add
a12
,
a12
,
a3
#
a12
=
end
of
last
16
B
source
chunk
#endif /* !XCHAL_HAVE_LOOPS */
.
Loop2
:
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
fixup
)
ALIGN
(
a6
,
a6
,
a7
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l32i
,
a9
,
a3
,
12
,
l_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
EX
(
l32i
,
a9
,
a3
,
12
,
fixup
)
ALIGN
(
a7
,
a7
,
a8
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
l32i
,
a6
,
a3
,
16
,
l_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
EX
(
l32i
,
a6
,
a3
,
16
,
fixup
)
ALIGN
(
a8
,
a8
,
a9
)
EX
(
s32i
,
a8
,
a5
,
8
,
s_
fixup
)
EX
(
s32i
,
a8
,
a5
,
8
,
fixup
)
addi
a3
,
a3
,
16
ALIGN
(
a9
,
a9
,
a6
)
EX
(
s32i
,
a9
,
a5
,
12
,
s_
fixup
)
EX
(
s32i
,
a9
,
a5
,
12
,
fixup
)
addi
a5
,
a5
,
16
#if !XCHAL_HAVE_LOOPS
blt
a3
,
a12
,
.
Loop2
...
...
@@ -246,39 +246,39 @@ __xtensa_copy_user:
.
Loop2done
:
bbci.l
a4
,
3
,
.
L12
#
copy
8
bytes
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
EX
(
l32i
,
a8
,
a3
,
8
,
fixup
)
ALIGN
(
a6
,
a6
,
a7
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
addi
a3
,
a3
,
8
ALIGN
(
a7
,
a7
,
a8
)
EX
(
s32i
,
a7
,
a5
,
4
,
s_
fixup
)
EX
(
s32i
,
a7
,
a5
,
4
,
fixup
)
addi
a5
,
a5
,
8
mov
a6
,
a8
.
L12
:
bbci.l
a4
,
2
,
.
L13
#
copy
4
bytes
EX
(
l32i
,
a7
,
a3
,
4
,
l_
fixup
)
EX
(
l32i
,
a7
,
a3
,
4
,
fixup
)
addi
a3
,
a3
,
4
ALIGN
(
a6
,
a6
,
a7
)
EX
(
s32i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s32i
,
a6
,
a5
,
0
,
fixup
)
addi
a5
,
a5
,
4
mov
a6
,
a7
.
L13
:
add
a3
,
a3
,
a10
#
readjust
a3
with
correct
misalignment
bbci.l
a4
,
1
,
.
L14
#
copy
2
bytes
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
l_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
l8ui
,
a7
,
a3
,
1
,
fixup
)
addi
a3
,
a3
,
2
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
s_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
EX
(
s8i
,
a7
,
a5
,
1
,
fixup
)
addi
a5
,
a5
,
2
.
L14
:
bbci.l
a4
,
0
,
.
L15
#
copy
1
byte
EX
(
l8ui
,
a6
,
a3
,
0
,
l_
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
s_
fixup
)
EX
(
l8ui
,
a6
,
a3
,
0
,
fixup
)
EX
(
s8i
,
a6
,
a5
,
0
,
fixup
)
.
L15
:
movi
a2
,
0
#
return
success
for
len
bytes
copied
retw
...
...
@@ -291,30 +291,10 @@ __xtensa_copy_user:
*
bytes_copied
=
a5
-
a2
*
retval
=
bytes_not_copied
=
original
len
-
bytes_copied
*
retval
=
a11
-
(
a5
-
a2
)
*
*
Clearing
the
remaining
pieces
of
kernel
memory
plugs
security
*
holes
.
This
functionality
is
the
equivalent
of
the
*
_zeroing
*
functions
that
some
architectures
provide
.
*/
.
Lmemset
:
.
word
memset
s_
fixup
:
fixup
:
sub
a2
,
a5
,
a2
/*
a2
<--
bytes
copied
*/
sub
a2
,
a11
,
a2
/*
a2
<--
bytes
not
copied
*/
retw
l_fixup
:
sub
a2
,
a5
,
a2
/*
a2
<--
bytes
copied
*/
sub
a2
,
a11
,
a2
/*
a2
<--
bytes
not
copied
==
return
value
*/
/
*
void
*
memset
(
void
*
s
,
int
c
,
size_t
n
)
; */
mov
a6
,
a5
/*
s
*/
movi
a7
,
0
/*
c
*/
mov
a8
,
a2
/*
n
*/
l32r
a4
,
.
Lmemset
callx4
a4
/
*
Ignore
memset
return
value
in
a6
.
*/
/
*
a2
still
contains
bytes
not
copied
.
*/
retw
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment