Commit 91ff69e8 authored by Jiong Wang's avatar Jiong Wang Committed by Alexei Starovoitov

nfp: bpf: support unaligned read offset

This patch add the support for unaligned read offset, i.e. the read offset
to the start of packet cache area is not aligned to REG_WIDTH. In this
case, the read area might across maximum three transfer-in registers.
Signed-off-by: default avatarJiong Wang <jiong.wang@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent be759237
......@@ -553,6 +553,19 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
}
/* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the
* result to @dst from offset, there is no change on the other bits of @dst.
*/
static void
wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src,
u8 field_len, u8 offset)
{
enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE;
u8 mask = ((1 << field_len) - 1) << offset;
emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8);
}
static void
addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
swreg *rega, swreg *regb)
......@@ -1864,6 +1877,60 @@ mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
off, xfer_num - 1, true, indir);
}
static int
mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
unsigned int size)
{
s16 range_start = meta->pkt_cache.range_start;
s16 insn_off = meta->insn.off - range_start;
swreg dst_lo, dst_hi, src_lo, src_mid;
u8 dst_gpr = meta->insn.dst_reg * 2;
u8 len_lo = size, len_mid = 0;
u8 idx = insn_off / REG_WIDTH;
u8 off = insn_off % REG_WIDTH;
dst_hi = reg_both(dst_gpr + 1);
dst_lo = reg_both(dst_gpr);
src_lo = reg_xfer(idx);
/* The read length could involve as many as three registers. */
if (size > REG_WIDTH - off) {
/* Calculate the part in the second register. */
len_lo = REG_WIDTH - off;
len_mid = size - len_lo;
/* Calculate the part in the third register. */
if (size > 2 * REG_WIDTH - off)
len_mid = REG_WIDTH;
}
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
if (!len_mid) {
wrp_immed(nfp_prog, dst_hi, 0);
return 0;
}
src_mid = reg_xfer(idx + 1);
if (size <= REG_WIDTH) {
wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
wrp_immed(nfp_prog, dst_hi, 0);
} else {
swreg src_hi = reg_xfer(idx + 2);
wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid,
REG_WIDTH - len_lo, len_lo);
wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo,
REG_WIDTH - len_lo);
wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo,
len_lo);
}
return 0;
}
static int
mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
......@@ -1900,10 +1967,10 @@ mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
{
u8 off = meta->insn.off - meta->pkt_cache.range_start;
if (WARN_ON_ONCE(!IS_ALIGNED(off, REG_WIDTH)))
return -EOPNOTSUPP;
if (IS_ALIGNED(off, REG_WIDTH))
return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size);
return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size);
}
static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment