Commit 9f16c8ab authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: bpf: optimize mov64 a little

Loading 64bit constants require up to 4 load immediates, since
we can only load 16 bits at a time.  If the 32bit halves of
the 64bit constant are the same, however, we can save a cycle
by doing a register move instead of two loads of 16 bits.

Note that we don't optimize the normal ALU64 load because even
though it's a 64 bit load the upper half of the register is
a coming from sign extension so we can load it in one cycle
anyway.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b14157ee
......@@ -1384,19 +1384,28 @@ static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
meta->insn.imm);
struct nfp_insn_meta *prev = nfp_meta_prev(meta);
u32 imm_lo, imm_hi;
u8 dst;
dst = prev->insn.dst_reg * 2;
imm_lo = prev->insn.imm;
imm_hi = meta->insn.imm;
wrp_immed(nfp_prog, reg_both(dst), imm_lo);
/* mov is always 1 insn, load imm may be two, so try to use mov */
if (imm_hi == imm_lo)
wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
else
wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
return 0;
}
static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
meta->double_cb = imm_ld8_part2;
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment