Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
23504bae
Commit
23504bae
authored
Mar 21, 2017
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tile: get rid of zeroing, switch to RAW_COPY_USER
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
c0ea73f1
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
27 additions
and
180 deletions
+27
-180
arch/tile/Kconfig
arch/tile/Kconfig
+1
-0
arch/tile/include/asm/uaccess.h
arch/tile/include/asm/uaccess.h
+7
-136
arch/tile/lib/exports.c
arch/tile/lib/exports.c
+3
-4
arch/tile/lib/memcpy_32.S
arch/tile/lib/memcpy_32.S
+13
-28
arch/tile/lib/memcpy_user_64.c
arch/tile/lib/memcpy_user_64.c
+3
-12
No files found.
arch/tile/Kconfig
View file @
23504bae
...
...
@@ -33,6 +33,7 @@ config TILE
select USER_STACKTRACE_SUPPORT
select USE_PMC if PERF_EVENTS
select VIRT_TO_BUS
select ARCH_HAS_RAW_COPY_USER
config MMU
def_bool y
...
...
arch/tile/include/asm/uaccess.h
View file @
23504bae
...
...
@@ -313,145 +313,16 @@ extern int __put_user_bad(void)
((x) = 0, -EFAULT); \
})
/**
* __copy_to_user() - copy data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* An alternate version - __copy_to_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable().
*/
extern
unsigned
long
__must_check
__copy_to_user_inatomic
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_to_user_inatomic
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__must_check
copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
n
=
__copy_to_user
(
to
,
from
,
n
);
return
n
;
}
/**
* __copy_from_user() - copy data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable(). This version
* does *NOT* pad with zeros.
*/
extern
unsigned
long
__must_check
__copy_from_user_inatomic
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_from_user_zeroing
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_from_user_zeroing
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__must_check
_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
n
=
__copy_from_user
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
}
extern
void
__compiletime_error
(
"usercopy buffer size is too small"
)
__bad_copy_user
(
void
);
static
inline
void
copy_user_overflow
(
int
size
,
unsigned
long
count
)
{
WARN
(
1
,
"Buffer overflow detected (%d < %lu)!
\n
"
,
size
,
count
);
}
static
inline
unsigned
long
__must_check
copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
int
sz
=
__compiletime_object_size
(
to
);
if
(
likely
(
sz
==
-
1
||
sz
>=
n
))
n
=
_copy_from_user
(
to
,
from
,
n
);
else
if
(
!
__builtin_constant_p
(
n
))
copy_user_overflow
(
sz
,
n
);
else
__bad_copy_user
();
return
n
;
}
extern
unsigned
long
__must_check
raw_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
raw_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#ifdef __tilegx__
/**
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
extern
unsigned
long
__copy_in_user_inatomic
(
extern
unsigned
long
raw_copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_fault
();
return
__copy_in_user_inatomic
(
to
,
from
,
n
);
}
static
inline
unsigned
long
__must_check
copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
)
&&
access_ok
(
VERIFY_READ
,
from
,
n
))
n
=
__copy_in_user
(
to
,
from
,
n
);
return
n
;
}
#endif
...
...
arch/tile/lib/exports.c
View file @
23504bae
...
...
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL
(
memcpy
);
EXPORT_SYMBOL
(
__copy_to_user_inatomic
);
EXPORT_SYMBOL
(
__copy_from_user_inatomic
);
EXPORT_SYMBOL
(
__copy_from_user_zeroing
);
EXPORT_SYMBOL
(
raw_copy_to_user
);
EXPORT_SYMBOL
(
raw_copy_from_user
);
#ifdef __tilegx__
EXPORT_SYMBOL
(
__copy_in_user_inatomic
);
EXPORT_SYMBOL
(
raw_copy_in_user
);
#endif
/* hypervisor glue */
...
...
arch/tile/lib/memcpy_32.S
View file @
23504bae
...
...
@@ -24,7 +24,6 @@
#define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1
#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1
.
section
.
text
.
memcpy_common
,
"ax"
...
...
@@ -42,40 +41,31 @@
9
/*
__copy_from_user_inatomic
takes
the
kernel
target
address
in
r0
,
/*
raw_copy_from_user
takes
the
kernel
target
address
in
r0
,
*
the
user
source
in
r1
,
and
the
bytes
to
copy
in
r2
.
*
It
returns
the
number
of
uncopiable
bytes
(
hopefully
zero
)
in
r0
.
*/
ENTRY
(
__copy_from_user_inatomic
)
.
type
__copy_from_user_inatomic
,
@
function
FEEDBACK_ENTER_EXPLICIT
(
__copy_from_user_inatomic
,
\
ENTRY
(
raw_copy_from_user
)
.
type
raw_copy_from_user
,
@
function
FEEDBACK_ENTER_EXPLICIT
(
raw_copy_from_user
,
\
.
text.
memcpy_common
,
\
.
Lend_memcpy_common
-
__copy_from_user_inatomic
)
.
Lend_memcpy_common
-
raw_copy_from_user
)
{
movei
r29
,
IS_COPY_FROM_USER
; j memcpy_common }
.
size
__copy_from_user_inatomic
,
.
-
__copy_from_user_inatomic
.
size
raw_copy_from_user
,
.
-
raw_copy_from_user
/*
__copy_from_user_zeroing
is
like
__copy_from_user_inatomic
,
but
*
any
uncopiable
bytes
are
zeroed
in
the
target
.
*/
ENTRY
(
__copy_from_user_zeroing
)
.
type
__copy_from_user_zeroing
,
@
function
FEEDBACK_REENTER
(
__copy_from_user_inatomic
)
{
movei
r29
,
IS_COPY_FROM_USER_ZEROING
; j memcpy_common }
.
size
__copy_from_user_zeroing
,
.
-
__copy_from_user_zeroing
/*
__copy_to_user_inatomic
takes
the
user
target
address
in
r0
,
/*
raw_copy_to_user
takes
the
user
target
address
in
r0
,
*
the
kernel
source
in
r1
,
and
the
bytes
to
copy
in
r2
.
*
It
returns
the
number
of
uncopiable
bytes
(
hopefully
zero
)
in
r0
.
*/
ENTRY
(
__copy_to_user_inatomic
)
.
type
__copy_to_user_inatomic
,
@
function
FEEDBACK_REENTER
(
__copy_from_user_inatomic
)
ENTRY
(
raw_copy_to_user
)
.
type
raw_copy_to_user
,
@
function
FEEDBACK_REENTER
(
raw_copy_from_user
)
{
movei
r29
,
IS_COPY_TO_USER
; j memcpy_common }
.
size
__copy_to_user_inatomic
,
.
-
__copy_to_user_inatomic
.
size
raw_copy_to_user
,
.
-
raw_copy_to_user
ENTRY
(
memcpy
)
.
type
memcpy
,
@
function
FEEDBACK_REENTER
(
__copy_from_user_inatomic
)
FEEDBACK_REENTER
(
raw_copy_from_user
)
{
movei
r29
,
IS_MEMCPY
}
.
size
memcpy
,
.
-
memcpy
/
*
Fall
through
*/
...
...
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{
bnzt
r2
,
copy_from_user_fixup_loop
}
.
Lcopy_from_user_fixup_zero_remainder
:
{
bbs
r29
,
2
f
}
/*
low
bit
set
means
IS_COPY_FROM_USER
*/
/
*
byte
-
at
-
a
-
time
loop
faulted
,
so
zero
the
rest
.
*/
{
move
r3
,
r2
; bz r2, 2f /* should be impossible, but handle it. */ }
1
:
{
sb
r0
,
zero
; addi r0, r0, 1; addi r3, r3, -1 }
{
bnzt
r3
,
1
b
}
2
:
move
lr
,
r27
move
lr
,
r27
{
move
r0
,
r2
; jrp lr }
copy_to_user_fixup_loop
:
...
...
arch/tile/lib/memcpy_user_64.c
View file @
23504bae
...
...
@@ -51,7 +51,7 @@
__v; \
})
#define USERCOPY_FUNC
__copy_to_user_inatomic
#define USERCOPY_FUNC
raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
...
...
@@ -62,7 +62,7 @@
#define LD8 LD
#include "memcpy_64.c"
#define USERCOPY_FUNC
__copy_from_user_inatomic
#define USERCOPY_FUNC
raw_copy_from_user
#define ST1 ST
#define ST2 ST
#define ST4 ST
...
...
@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
#define USERCOPY_FUNC
__copy_in_user_inatomic
#define USERCOPY_FUNC
raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
...
...
@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
unsigned
long
__copy_from_user_zeroing
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
unsigned
long
rc
=
__copy_from_user_inatomic
(
to
,
from
,
n
);
if
(
unlikely
(
rc
))
memset
(
to
+
n
-
rc
,
0
,
rc
);
return
rc
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment