Commit ab01f4ac authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

nfp: bpf: remember maps by ID

Record perf maps by map ID, not raw kernel pointer.  This helps
with debug messages, because printing pointers to logs is frowned
upon, and makes debug easier for the users, as map ID is something
they should be more familiar with.  Note that perf maps are offload
neutral, therefore IDs won't be orphaned.

While at it use a rate limited print helper for the error message.
Reported-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 09587627
...@@ -43,8 +43,6 @@ ...@@ -43,8 +43,6 @@
#include "fw.h" #include "fw.h"
#include "main.h" #include "main.h"
#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4) #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf) static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
......
...@@ -3883,6 +3883,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) ...@@ -3883,6 +3883,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
struct nfp_insn_meta *meta1, *meta2; struct nfp_insn_meta *meta1, *meta2;
struct nfp_bpf_map *nfp_map; struct nfp_bpf_map *nfp_map;
struct bpf_map *map; struct bpf_map *map;
u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
if (meta1->skip || meta2->skip) if (meta1->skip || meta2->skip)
...@@ -3894,11 +3895,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) ...@@ -3894,11 +3895,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
map = (void *)(unsigned long)((u32)meta1->insn.imm | map = (void *)(unsigned long)((u32)meta1->insn.imm |
(u64)meta2->insn.imm << 32); (u64)meta2->insn.imm << 32);
if (bpf_map_offload_neutral(map)) if (bpf_map_offload_neutral(map)) {
continue; id = map->id;
} else {
nfp_map = map_to_offmap(map)->dev_priv; nfp_map = map_to_offmap(map)->dev_priv;
id = nfp_map->tid;
}
meta1->insn.imm = nfp_map->tid; meta1->insn.imm = id;
meta2->insn.imm = 0; meta2->insn.imm = 0;
} }
......
...@@ -45,8 +45,8 @@ ...@@ -45,8 +45,8 @@
const struct rhashtable_params nfp_bpf_maps_neutral_params = { const struct rhashtable_params nfp_bpf_maps_neutral_params = {
.nelem_hint = 4, .nelem_hint = 4,
.key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr), .key_len = FIELD_SIZEOF(struct bpf_map, id),
.key_offset = offsetof(struct nfp_bpf_neutral_map, ptr), .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id),
.head_offset = offsetof(struct nfp_bpf_neutral_map, l), .head_offset = offsetof(struct nfp_bpf_neutral_map, l),
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
......
...@@ -47,6 +47,8 @@ ...@@ -47,6 +47,8 @@
#include "../nfp_asm.h" #include "../nfp_asm.h"
#include "fw.h" #include "fw.h"
#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
/* For relocation logic use up-most byte of branch instruction as scratch /* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW! * area. Remember to clear this before sending instructions to HW!
*/ */
...@@ -221,6 +223,7 @@ struct nfp_bpf_map { ...@@ -221,6 +223,7 @@ struct nfp_bpf_map {
struct nfp_bpf_neutral_map { struct nfp_bpf_neutral_map {
struct rhash_head l; struct rhash_head l;
struct bpf_map *ptr; struct bpf_map *ptr;
u32 map_id;
u32 count; u32 count;
}; };
......
...@@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, ...@@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
ASSERT_RTNL(); ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */ /* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map, record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params); nfp_bpf_maps_neutral_params);
if (record) { if (record) {
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record; nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
...@@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, ...@@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
} }
record->ptr = map; record->ptr = map;
record->map_id = map->id;
record->count = 1; record->count = 1;
err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l, err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
...@@ -457,15 +458,17 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, ...@@ -457,15 +458,17 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
unsigned int len) unsigned int len)
{ {
struct cmsg_bpf_event *cbe = (void *)data; struct cmsg_bpf_event *cbe = (void *)data;
u32 pkt_size, data_size; struct nfp_bpf_neutral_map *record;
struct bpf_map *map; u32 pkt_size, data_size, map_id;
u64 map_id_full;
if (len < sizeof(struct cmsg_bpf_event)) if (len < sizeof(struct cmsg_bpf_event))
return -EINVAL; return -EINVAL;
pkt_size = be32_to_cpu(cbe->pkt_size); pkt_size = be32_to_cpu(cbe->pkt_size);
data_size = be32_to_cpu(cbe->data_size); data_size = be32_to_cpu(cbe->data_size);
map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr); map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL; return -EINVAL;
...@@ -473,15 +476,16 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, ...@@ -473,15 +476,16 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return -EINVAL; return -EINVAL;
rcu_read_lock(); rcu_read_lock();
if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map, record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
nfp_bpf_maps_neutral_params)) { nfp_bpf_maps_neutral_params);
if (!record || map_id_full > U32_MAX) {
rcu_read_unlock(); rcu_read_unlock();
pr_warn("perf event: dest map pointer %px not recognized, dropping event\n", cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
map); map_id_full, map_id_full);
return -EINVAL; return -EINVAL;
} }
bpf_event_output(map, be32_to_cpu(cbe->cpu_id), bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
&cbe->data[round_up(pkt_size, 4)], data_size, &cbe->data[round_up(pkt_size, 4)], data_size,
cbe->data, pkt_size, nfp_bpf_perf_event_copy); cbe->data, pkt_size, nfp_bpf_perf_event_copy);
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment