Commit 3cd64083 authored by Paul Burton's avatar Paul Burton

MIPS: ptrace: introduce NT_MIPS_MSA regset

The current methods for obtaining FP context via ptrace only provide
either 32 or 64 bits per data register. With MSA, where vector registers
are aliased with scalar FP data registers, those registers are 128 bits
wide. Thus a new mechanism is required for userland to access those
registers via ptrace. This patch introduces an NT_MIPS_MSA regset which
provides, in this order:

  - The full 128 bits value of each vector register, in native
    endianness saved as though elements are doubles. That is, the format
    of each vector register is as would be obtained by saving it to
    memory using an st.d instruction.

  - The 32 bit scalar FP implementation register (FIR).

  - The 32 bit scalar FP control & status register (FCSR).

  - The 32 bit MSA implementation register (MSAIR).

  - The 32 bit MSA control & status register (MSACSR).

The provision of the FIR & FCSR registers in addition to the MSA
equivalents allows scalar FP context to be retrieved as a subset of
the context available via this regset. Along with the MSA equivalents
they also nicely form the final 128 bit "register" of the regset.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/21180/
Cc: linux-mips@linux-mips.org
parent bec0de4c
......@@ -622,6 +622,130 @@ static int fp_mode_set(struct task_struct *target,
#endif /* CONFIG_MIPS_FP_SUPPORT */
#ifdef CONFIG_CPU_HAS_MSA
struct msa_control_regs {
unsigned int fir;
unsigned int fcsr;
unsigned int msair;
unsigned int msacsr;
};
static int copy_pad_fprs(struct task_struct *target,
const struct user_regset *regset,
unsigned int *ppos, unsigned int *pcount,
void **pkbuf, void __user **pubuf,
unsigned int live_sz)
{
int i, j, start, start_pad, err;
unsigned long long fill = ~0ull;
unsigned int cp_sz, pad_sz;
cp_sz = min(regset->size, live_sz);
pad_sz = regset->size - cp_sz;
WARN_ON(pad_sz % sizeof(fill));
i = start = err = 0;
for (; i < NUM_FPU_REGS; i++, start += regset->size) {
err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
&target->thread.fpu.fpr[i],
start, start + cp_sz);
start_pad = start + cp_sz;
for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
&fill, start_pad,
start_pad + sizeof(fill));
start_pad += sizeof(fill);
}
}
return err;
}
static int msa_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
const struct msa_control_regs ctrl_regs = {
.fir = boot_cpu_data.fpu_id,
.fcsr = target->thread.fpu.fcr31,
.msair = boot_cpu_data.msa_id,
.msacsr = target->thread.fpu.msacsr,
};
int err;
if (!tsk_used_math(target)) {
/* The task hasn't used FP or MSA, fill with 0xff */
err = copy_pad_fprs(target, regset, &pos, &count,
&kbuf, &ubuf, 0);
} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
/* Copy scalar FP context, fill the rest with 0xff */
err = copy_pad_fprs(target, regset, &pos, &count,
&kbuf, &ubuf, 8);
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr,
0, wr_size);
} else {
/* Copy as much context as possible, fill the rest with 0xff */
err = copy_pad_fprs(target, regset, &pos, &count,
&kbuf, &ubuf,
sizeof(target->thread.fpu.fpr[0]));
}
err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&ctrl_regs, wr_size,
wr_size + sizeof(ctrl_regs));
return err;
}
static int msa_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
struct msa_control_regs ctrl_regs;
unsigned int cp_sz;
int i, err, start;
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr,
0, wr_size);
} else {
/* Copy as much context as possible */
cp_sz = min_t(unsigned int, regset->size,
sizeof(target->thread.fpu.fpr[0]));
i = start = err = 0;
for (; i < NUM_FPU_REGS; i++, start += regset->size) {
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr[i],
start, start + cp_sz);
}
}
if (!err)
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
wr_size, wr_size + sizeof(ctrl_regs));
if (!err) {
target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
}
return err;
}
#endif /* CONFIG_CPU_HAS_MSA */
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
/*
......@@ -798,6 +922,9 @@ enum mips_regset {
REGSET_FPR,
REGSET_FP_MODE,
#endif
#ifdef CONFIG_CPU_HAS_MSA
REGSET_MSA,
#endif
};
struct pt_regs_offset {
......@@ -922,6 +1049,16 @@ static const struct user_regset mips_regsets[] = {
.set = fp_mode_set,
},
#endif
#ifdef CONFIG_CPU_HAS_MSA
[REGSET_MSA] = {
.core_note_type = NT_MIPS_MSA,
.n = NUM_FPU_REGS + 1,
.size = 16,
.align = 16,
.get = msa_get,
.set = msa_set,
},
#endif
};
static const struct user_regset_view user_mips_view = {
......@@ -972,6 +1109,16 @@ static const struct user_regset mips64_regsets[] = {
.set = fpr_set,
},
#endif
#ifdef CONFIG_CPU_HAS_MSA
[REGSET_MSA] = {
.core_note_type = NT_MIPS_MSA,
.n = NUM_FPU_REGS + 1,
.size = 16,
.align = 16,
.get = msa_get,
.set = msa_set,
},
#endif
};
static const struct user_regset_view user_mips64_view = {
......
......@@ -424,6 +424,7 @@ typedef struct elf64_shdr {
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment