Commit 4f0d14b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'metag-for-v4.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag

Pull metag usercopy fixes from James Hogan:
 "Metag usercopy fault handling fixes

  These patches fix a bunch of longstanding (some over a decade old)
  metag user copy fault handling bugs. Thanks go to Al Viro for spotting
  some of the questionable code in the first place"

* tag 'metag-for-v4.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag:
  metag/usercopy: Add missing fixups
  metag/usercopy: Fix src fixup in from user rapf loops
  metag/usercopy: Set flags before ADDZ
  metag/usercopy: Zero rest of buffer from copy_from_user
  metag/usercopy: Add early abort to copy_to_user
  metag/usercopy: Fix alignment error checking
  metag/usercopy: Drop unused macros
parents 7ab66185 b884a190
......@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
extern unsigned long __must_check __copy_user_zeroing(void *to,
const void __user *from,
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n);
memset(to, 0, n);
return n;
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
......
......@@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
......@@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #32\n" \
"23:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"SUB %3, %3, #32\n" \
"24:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
"25:\n" \
"27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"26:\n" \
"28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"29:\n" \
"SUB %3, %3, #32\n" \
"27:\n" \
"30:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"31:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"32:\n" \
"SUB %0, %0, #8\n" \
"29:\n" \
"33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
......@@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
" .long 29b,4b\n" \
" .long 29b,3b\n" \
" .long 30b,3b\n" \
" .long 31b,3b\n" \
" .long 32b,3b\n" \
" .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
: "D1Ar1", "D0Ar2", "memory")
: "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
......@@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
"AND D0Ar2, D0Ar2, #0x7\n" \
"ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
......@@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"23:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"24:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"25:\n" \
"24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"26:\n" \
"25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"29:\n" \
"SUB %3, %3, #16\n" \
"30:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"31:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
"29:\n" \
"33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"30:\n" \
"34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"35:\n" \
"SUB %3, %3, #16\n" \
"31:\n" \
"36:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"32:\n" \
"37:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"38:\n" \
"SUB %3, %3, #16\n" \
"33:\n" \
"39:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"34:\n" \
"40:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"41:\n" \
"SUB %3, %3, #16\n" \
"35:\n" \
"42:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"36:\n" \
"43:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"44:\n" \
"SUB %0, %0, #4\n" \
"37:\n" \
"45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
......@@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
" .long 37b,4b\n" \
" .long 37b,3b\n" \
" .long 38b,3b\n" \
" .long 39b,3b\n" \
" .long 40b,3b\n" \
" .long 41b,3b\n" \
" .long 42b,3b\n" \
" .long 43b,3b\n" \
" .long 44b,3b\n" \
" .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
: "D1Ar1", "D0Ar2", "memory")
: "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
......@@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
"AND D0Ar2, D0Ar2, #0x7\n" \
"ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
......@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
}
......@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
if (retn)
return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
......@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
if (retn)
return retn + n;
}
}
#endif
......@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
if (retn)
return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
if (retn)
return retn + n;
}
switch (n) {
......@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
break;
}
/*
* If we get here, retn correctly reflects the number of failing
* bytes.
*/
return retn;
}
EXPORT_SYMBOL(__copy_user);
......@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
"3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
......@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
"3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_5(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"4: SETW [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_from_user_6(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_7(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"4: SETD [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_from_user_8(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_9(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"6: SETW [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_from_user_10(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_11(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"6: SETD [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_from_user_12(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_13(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"8: SETW [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_from_user_14(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_15(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"10: SETB [%0++],D1Ar1\n", \
"11: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"8: SETD [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_from_user_16(to, from, ret) \
__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
" MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
" SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
......@@ -789,35 +711,56 @@ EXPORT_SYMBOL(__copy_user);
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
* source. Since the fault is at a single address, we only
* need to rewind by 8 bytes.
* source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
* corrected.
* corrected, but the source must be rewound to the beginning of
* the block, which is LSM_STEP*8 bytes.
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
* and stored in D0Ar2
*
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
* a fault happens at the 4th write, LSM_STEP will be 0
* instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"SUB %1, %1, #8\n")
"LSR D0Ar2, D0Ar2, #5\n" \
"ANDS D0Ar2, D0Ar2, #0x38\n" \
"ADDZ D0Ar2, D0Ar2, #32\n" \
"SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
* source. Since the fault is at a single address, we only
* need to rewind by 4 bytes.
* source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
* corrected.
* corrected, but the source must be rewound to the beginning of
* the block, which is LSM_STEP*4 bytes.
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
* and stored in D0Ar2
*
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
* a fault happens at the 4th write, LSM_STEP will be 0
* instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"SUB %1, %1, #4\n")
"LSR D0Ar2, D0Ar2, #6\n" \
"ANDS D0Ar2, D0Ar2, #0x1c\n" \
"ADDZ D0Ar2, D0Ar2, #16\n" \
"SUB %1, %1, D0Ar2\n")
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/*
* Copy from user to kernel. The return-value is the number of bytes that were
* inaccessible.
*/
unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
......@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
......@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
......@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
/* We only need one check after the unalignment-adjustments,
because if both adjustments were done, either both or
neither reference had an exception. */
if (retn != 0)
goto copy_exception_bytes;
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
......@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
......@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
#endif
......@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
/* If we get here, there were no memory read faults. */
......@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
copy_exception_bytes:
/* We already have "retn" bytes cleared, and need to clear the
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
memset is preferred here, since this isn't speed-critical code and
we'd rather have this a leaf-function than calling memset. */
{
char *endp;
for (endp = dst + n; dst < endp; dst++)
*dst = 0;
}
return retn + n;
}
EXPORT_SYMBOL(__copy_user_zeroing);
EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment