Commit c2a658d4 authored by Andy Chiu's avatar Andy Chiu Committed by Palmer Dabbelt

riscv: lib: vectorize copy_to_user/copy_from_user

This patch utilizes Vector to perform copy_to_user/copy_from_user. If
Vector is available and the size of copy is large enough for Vector to
perform better than scalar, then direct the kernel to do Vector copies
for userspace. Though the best programming practice for users is to
reduce the copy, this provides a faster variant when copies are
inevitable.

The optimal size for using Vector, copy_to_user_thres, is only a
heuristic for now. We can add DT parsing if people feel the need of
customizing it.

The exception fixup code of the __asm_vector_usercopy must fallback to
the scalar one because accessing user pages might fault, and must be
sleepable. Current kernel-mode Vector does not allow tasks to be
preemptible, so we must disactivate Vector and perform a scalar fallback
in such case.

The original implementation of Vector operations comes from
https://github.com/sifive/sifive-libc, which we agree to contribute to
Linux kernel.
Co-developed-by: default avatarJerry Shih <jerry.shih@sifive.com>
Signed-off-by: default avatarJerry Shih <jerry.shih@sifive.com>
Co-developed-by: default avatarNick Knight <nick.knight@sifive.com>
Signed-off-by: default avatarNick Knight <nick.knight@sifive.com>
Suggested-by: default avatarGuo Ren <guoren@kernel.org>
Signed-off-by: default avatarAndy Chiu <andy.chiu@sifive.com>
Tested-by: default avatarBjörn Töpel <bjorn@rivosinc.com>
Tested-by: default avatarLad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Link: https://lore.kernel.org/r/20240115055929.4736-6-andy.chiu@sifive.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 7df56cbc
......@@ -525,6 +525,14 @@ config RISCV_ISA_V_DEFAULT_ENABLE
If you don't know what to do here, say Y.
config RISCV_ISA_V_UCOPY_THRESHOLD
int "Threshold size for vectorized user copies"
depends on RISCV_ISA_V
default 768
help
Prefer using vectorized copy_to_user()/copy_from_user() when the
workload size exceeds this value.
config TOOLCHAIN_HAS_ZBB
bool
default y
......
......@@ -11,6 +11,10 @@ long long __ashlti3(long long a, int b);
#ifdef CONFIG_RISCV_ISA_V
#ifdef CONFIG_MMU
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n);
#endif /* CONFIG_MMU */
void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,
const unsigned long *__restrict p2);
void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1,
......
......@@ -6,9 +6,13 @@ lib-y += memmove.o
lib-y += strcmp.o
lib-y += strlen.o
lib-y += strncmp.o
lib-$(CONFIG_MMU) += uaccess.o
ifeq ($(CONFIG_MMU), y)
lib-y += uaccess.o
lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
endif
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RISCV_ISA_V) += xor.o
lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2023 SiFive
* Author: Andy Chiu <andy.chiu@sifive.com>
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/vector.h>
#include <asm/simd.h>
#ifdef CONFIG_MMU
#include <asm/asm-prototypes.h>
#endif
#ifdef CONFIG_MMU
size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
int __asm_vector_usercopy(void *dst, void *src, size_t n);
int fallback_scalar_usercopy(void *dst, void *src, size_t n);
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
{
size_t remain, copied;
/* skip has_vector() check because it has been done by the asm */
if (!may_use_simd())
goto fallback;
kernel_vector_begin();
remain = __asm_vector_usercopy(dst, src, n);
kernel_vector_end();
if (remain) {
copied = n - remain;
dst += copied;
src += copied;
n = remain;
goto fallback;
}
return remain;
fallback:
return fallback_scalar_usercopy(dst, src, n);
}
#endif
......@@ -3,6 +3,8 @@
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/alternative-macros.h>
.macro fixup op reg addr lbl
100:
......@@ -11,6 +13,13 @@
.endm
SYM_FUNC_START(__asm_copy_to_user)
#ifdef CONFIG_RISCV_ISA_V
ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
REG_L t0, riscv_v_usercopy_threshold
bltu a2, t0, fallback_scalar_usercopy
tail enter_vector_usercopy
#endif
SYM_FUNC_START(fallback_scalar_usercopy)
/* Enable access to user memory */
li t6, SR_SUM
......@@ -181,6 +190,7 @@ SYM_FUNC_START(__asm_copy_to_user)
sub a0, t5, a0
ret
SYM_FUNC_END(__asm_copy_to_user)
SYM_FUNC_END(fallback_scalar_usercopy)
EXPORT_SYMBOL(__asm_copy_to_user)
SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_from_user)
......
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
#include <asm-generic/export.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
#define pDst a0
#define pSrc a1
#define iNum a2
#define iVL a3
#define ELEM_LMUL_SETTING m8
#define vData v0
.macro fixup op reg addr lbl
100:
\op \reg, \addr
_asm_extable 100b, \lbl
.endm
SYM_FUNC_START(__asm_vector_usercopy)
/* Enable access to user memory */
li t6, SR_SUM
csrs CSR_STATUS, t6
loop:
vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
fixup vle8.v vData, (pSrc), 10f
sub iNum, iNum, iVL
add pSrc, pSrc, iVL
fixup vse8.v vData, (pDst), 11f
add pDst, pDst, iVL
bnez iNum, loop
/* Exception fixup for vector load is shared with normal exit */
10:
/* Disable access to user memory */
csrc CSR_STATUS, t6
mv a0, iNum
ret
/* Exception fixup code for vector store. */
11:
/* Undo the subtraction after vle8.v */
add iNum, iNum, iVL
/* Make sure the scalar fallback skip already processed bytes */
csrr t2, CSR_VSTART
sub iNum, iNum, t2
j 10b
SYM_FUNC_END(__asm_vector_usercopy)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment