Commit 3a5866a0 authored by Heiko Carstens's avatar Heiko Carstens

s390/fpu: provide and use vlm and vstm inline assemblies

Instead of open-coding vlm and vstm inline assemblies at several locations,
provide an fpu_* function for each instruction, and use them in the new
save_vx_regs() and load_vx_regs() helper functions.

Note that "O" and "R" inline assembly operand modifiers are used in order
to pass the displacement and base register of the memory operands to the
existing VLM and VSTM macros. The two operand modifiers are not available
for clang. Therefore provide two variants of each inline assembly.

The clang variant always uses and clobbers general purpose register 1, like
in the previous inline assemblies, so it can be used as base register with
a zero displacement. This generates slightly less efficient code, but can
be removed as soon as clang has support for the used operand modifiers.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent f4e3de75
......@@ -108,5 +108,75 @@ static __always_inline void fpu_stfpc(unsigned int *fpc)
: "memory");
}
#ifdef CONFIG_CC_IS_CLANG
#define fpu_vlm(_v1, _v3, _vxrs) do { \
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
struct { \
__vector128 _v[(_v3) - (_v1) + 1]; \
} *_v = (void *)(_vxrs); \
\
instrument_read(_v, size); \
asm volatile("\n" \
" la 1,%[vxrs]\n" \
" VLM %[v1],%[v3],0,1\n" \
: \
: [vxrs] "R" (*_v), \
[v1] "I" (_v1), [v3] "I" (_v3) \
: "memory", "1"); \
} while (0)
#else /* CONFIG_CC_IS_CLANG */
#define fpu_vlm(_v1, _v3, _vxrs) do { \
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
struct { \
__vector128 _v[(_v3) - (_v1) + 1]; \
} *_v = (void *)(_vxrs); \
\
instrument_read(_v, size); \
asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
: \
: [vxrs] "Q" (*_v), \
[v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
} while (0)
#endif /* CONFIG_CC_IS_CLANG */
#ifdef CONFIG_CC_IS_CLANG
#define fpu_vstm(_v1, _v3, _vxrs) do { \
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
struct { \
__vector128 _v[(_v3) - (_v1) + 1]; \
} *_v = (void *)(_vxrs); \
\
instrument_write(_v, size); \
asm volatile("\n" \
" la 1,%[vxrs]\n" \
" VSTM %[v1],%[v3],0,1\n" \
: [vxrs] "=R" (*_v) \
: [v1] "I" (_v1), [v3] "I" (_v3) \
: "memory", "1"); \
} while (0)
#else /* CONFIG_CC_IS_CLANG */
#define fpu_vstm(_v1, _v3, _vxrs) do { \
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
struct { \
__vector128 _v[(_v3) - (_v1) + 1]; \
} *_v = (void *)(_vxrs); \
\
instrument_write(_v, size); \
asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
: [vxrs] "=Q" (*_v) \
: [v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
} while (0)
#endif /* CONFIG_CC_IS_CLANG */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_FPU_INSN_H */
......@@ -84,6 +84,18 @@ void __load_fpu_regs(void);
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
static __always_inline void save_vx_regs(__vector128 *vxrs)
{
fpu_vstm(0, 15, &vxrs[0]);
fpu_vstm(16, 31, &vxrs[16]);
}
static __always_inline void load_vx_regs(__vector128 *vxrs)
{
fpu_vlm(0, 15, &vxrs[0]);
fpu_vlm(16, 31, &vxrs[16]);
}
static __always_inline void save_fp_regs(freg_t *fprs)
{
fpu_std(0, &fprs[0]);
......@@ -148,15 +160,6 @@ static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
preempt_enable();
}
static inline void save_vx_regs(__vector128 *vxrs)
{
asm volatile("\n"
" la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
: "=Q" (*(struct vx_array *)vxrs) : : "1");
}
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
{
int i;
......
......@@ -137,16 +137,10 @@ void __load_fpu_regs(void)
void *regs = current->thread.fpu.regs;
fpu_lfpc_safe(&state->fpc);
if (likely(cpu_has_vx())) {
asm volatile("lgr 1,%0\n"
"VLM 0,15,0,1\n"
"VLM 16,31,256,1\n"
:
: "d" (regs)
: "1", "cc", "memory");
} else {
if (likely(cpu_has_vx()))
load_vx_regs(regs);
else
load_fp_regs(regs);
}
clear_cpu_flag(CIF_FPU);
}
......@@ -173,16 +167,10 @@ void save_fpu_regs(void)
regs = current->thread.fpu.regs;
fpu_stfpc(&state->fpc);
if (likely(cpu_has_vx())) {
asm volatile("lgr 1,%0\n"
"VSTM 0,15,0,1\n"
"VSTM 16,31,256,1\n"
:
: "d" (regs)
: "1", "cc", "memory");
} else {
if (likely(cpu_has_vx()))
save_vx_regs(regs);
else
save_fp_regs(regs);
}
set_cpu_flag(CIF_FPU);
out:
local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment