Commit 64bf9657 authored by Javier Honduvilla Coto's avatar Javier Honduvilla Coto Committed by yonghong-song

Update scripts to use `increment` (#1905)

* Replace boilerplate for increment with a call to `increment`

from new tooling. Found cases to replace using `ripgrep`[1]:
```
$ rg '\(\*\w+\)\s*\+\+' -l | grep tools | grep -v old
```

[1]: https://github.com/BurntSushi/ripgrep

* Replace boilerplate for bigger than 1 increments with the new
`increment` call

from new tooling. Found cases to replace using `ripgrep`[1]:
```
$ rg '\(\*\w+\)\s*\+=' -l | grep tools | grep -v old
```

[1]: https://github.com/BurntSushi/ripgrep

* Update examples indicating the alternative increment call for hash tables
parent 7c4c5b9f
...@@ -557,16 +557,19 @@ struct key_t { ...@@ -557,16 +557,19 @@ struct key_t {
BPF_HASH(counts, struct key_t); BPF_HASH(counts, struct key_t);
int count(struct pt_regs *ctx) { int count(struct pt_regs *ctx) {
if (!PT_REGS_PARM1(ctx)) if (!PT_REGS_PARM1(ctx))
return 0; return 0;
struct key_t key = {}; struct key_t key = {};
u64 zero = 0, *val; u64 zero = 0, *val;
bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx)); bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx));
val = counts.lookup_or_init(&key, &zero);
(*val)++; // another possibility is using `counts.increment(key);`. It allows a second
return 0; // optional parameter to specify the increment step
val = counts.lookup_or_init(&key, &zero);
(*val)++;
return 0;
}; };
""") """)
b.attach_uprobe(name="c", sym="strlen", fn_name="count") b.attach_uprobe(name="c", sym="strlen", fn_name="count")
...@@ -681,6 +684,8 @@ int count_sched(struct pt_regs *ctx, struct task_struct *prev) { ...@@ -681,6 +684,8 @@ int count_sched(struct pt_regs *ctx, struct task_struct *prev) {
key.curr_pid = bpf_get_current_pid_tgid(); key.curr_pid = bpf_get_current_pid_tgid();
key.prev_pid = prev->pid; key.prev_pid = prev->pid;
// another possibility is using `counts.increment(key);`. It allows a second
// optional parameter to specify the increment step
val = stats.lookup_or_init(&key, &zero); val = stats.lookup_or_init(&key, &zero);
(*val)++; (*val)++;
return 0; return 0;
......
...@@ -33,6 +33,7 @@ int alloc_enter(struct pt_regs *ctx, size_t size) { ...@@ -33,6 +33,7 @@ int alloc_enter(struct pt_regs *ctx, size_t size) {
if (key < 0) if (key < 0)
return 0; return 0;
// could also use `calls.increment(key, size);`
u64 zero = 0, *val; u64 zero = 0, *val;
val = calls.lookup_or_init(&key, &zero); val = calls.lookup_or_init(&key, &zero);
(*val) += size; (*val) += size;
......
...@@ -31,6 +31,7 @@ int count(struct pt_regs *ctx) { ...@@ -31,6 +31,7 @@ int count(struct pt_regs *ctx) {
u64 zero = 0, *val; u64 zero = 0, *val;
bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx)); bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx));
// could also use `counts.increment(key)`
val = counts.lookup_or_init(&key, &zero); val = counts.lookup_or_init(&key, &zero);
(*val)++; (*val)++;
return 0; return 0;
......
...@@ -22,6 +22,7 @@ int count_sched(struct pt_regs *ctx, struct task_struct *prev) { ...@@ -22,6 +22,7 @@ int count_sched(struct pt_regs *ctx, struct task_struct *prev) {
key.curr_pid = bpf_get_current_pid_tgid(); key.curr_pid = bpf_get_current_pid_tgid();
key.prev_pid = prev->pid; key.prev_pid = prev->pid;
// could also use `stats.increment(key);`
val = stats.lookup_or_init(&key, &zero); val = stats.lookup_or_init(&key, &zero);
(*val)++; (*val)++;
return 0; return 0;
......
...@@ -94,12 +94,10 @@ BPF_HASH(counts, struct key_t); ...@@ -94,12 +94,10 @@ BPF_HASH(counts, struct key_t);
int do_count(struct pt_regs *ctx) { int do_count(struct pt_regs *ctx) {
struct key_t key = {}; struct key_t key = {};
u64 zero = 0, *val;
u64 ip; u64 ip;
key.ip = PT_REGS_IP(ctx); key.ip = PT_REGS_IP(ctx);
val = counts.lookup_or_init(&key, &zero); // update counter counts.increment(key); // update counter
(*val)++;
return 0; return 0;
} }
......
...@@ -153,7 +153,6 @@ def handle_loop(stdscr, args): ...@@ -153,7 +153,6 @@ def handle_loop(stdscr, args):
int do_count(struct pt_regs *ctx) { int do_count(struct pt_regs *ctx) {
struct key_t key = {}; struct key_t key = {};
u64 zero = 0 , *val;
u64 pid = bpf_get_current_pid_tgid(); u64 pid = bpf_get_current_pid_tgid();
u32 uid = bpf_get_current_uid_gid(); u32 uid = bpf_get_current_uid_gid();
...@@ -162,8 +161,7 @@ def handle_loop(stdscr, args): ...@@ -162,8 +161,7 @@ def handle_loop(stdscr, args):
key.uid = uid & 0xFFFFFFFF; key.uid = uid & 0xFFFFFFFF;
bpf_get_current_comm(&(key.comm), 16); bpf_get_current_comm(&(key.comm), 16);
val = counts.lookup_or_init(&key, &zero); // update counter counts.increment(key);
(*val)++;
return 0; return 0;
} }
......
...@@ -84,8 +84,7 @@ int count_only(struct pt_regs *ctx, struct irq_desc *desc) ...@@ -84,8 +84,7 @@ int count_only(struct pt_regs *ctx, struct irq_desc *desc)
irq_key_t key = {.slot = 0 /* ignore */}; irq_key_t key = {.slot = 0 /* ignore */};
bpf_probe_read(&key.name, sizeof(key.name), name); bpf_probe_read(&key.name, sizeof(key.name), name);
u64 zero = 0, *vp = dist.lookup_or_init(&key, &zero); dist.increment(key);
(*vp)++;
return 0; return 0;
} }
...@@ -136,8 +135,7 @@ else: ...@@ -136,8 +135,7 @@ else:
bpf_text = bpf_text.replace('STORE', bpf_text = bpf_text.replace('STORE',
'irq_key_t key = {.slot = 0 /* ignore */};' + 'irq_key_t key = {.slot = 0 /* ignore */};' +
'bpf_probe_read(&key.name, sizeof(key.name), name);' + 'bpf_probe_read(&key.name, sizeof(key.name), name);' +
'u64 zero = 0, *vp = dist.lookup_or_init(&key, &zero);' + 'dist.increment(key, delta);')
'(*vp) += delta;')
if debug or args.ebpf: if debug or args.ebpf:
print(bpf_text) print(bpf_text)
if args.ebpf: if args.ebpf:
......
...@@ -58,9 +58,7 @@ int on_cache_miss(struct bpf_perf_event_data *ctx) { ...@@ -58,9 +58,7 @@ int on_cache_miss(struct bpf_perf_event_data *ctx) {
struct key_t key = {}; struct key_t key = {};
get_key(&key); get_key(&key);
u64 zero = 0, *val; miss_count.increment(key, ctx->sample_period);
val = miss_count.lookup_or_init(&key, &zero);
(*val) += ctx->sample_period;
return 0; return 0;
} }
...@@ -69,9 +67,7 @@ int on_cache_ref(struct bpf_perf_event_data *ctx) { ...@@ -69,9 +67,7 @@ int on_cache_ref(struct bpf_perf_event_data *ctx) {
struct key_t key = {}; struct key_t key = {};
get_key(&key); get_key(&key);
u64 zero = 0, *val; ref_count.increment(key, ctx->sample_period);
val = ref_count.lookup_or_init(&key, &zero);
(*val) += ctx->sample_period;
return 0; return 0;
} }
......
...@@ -156,7 +156,6 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) { ...@@ -156,7 +156,6 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
} }
// create map key // create map key
u64 zero = 0, *val;
struct key_t key = {}; struct key_t key = {};
key.pid = pid; key.pid = pid;
...@@ -165,8 +164,7 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) { ...@@ -165,8 +164,7 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
key.kernel_stack_id = KERNEL_STACK_GET; key.kernel_stack_id = KERNEL_STACK_GET;
bpf_get_current_comm(&key.name, sizeof(key.name)); bpf_get_current_comm(&key.name, sizeof(key.name));
val = counts.lookup_or_init(&key, &zero); counts.increment(key, delta);
(*val) += delta;
return 0; return 0;
} }
""" """
......
...@@ -193,7 +193,6 @@ int oncpu(struct pt_regs *ctx, struct task_struct *p) { ...@@ -193,7 +193,6 @@ int oncpu(struct pt_regs *ctx, struct task_struct *p) {
} }
// create map key // create map key
u64 zero = 0, *val;
struct key_t key = {}; struct key_t key = {};
struct wokeby_t *woke; struct wokeby_t *woke;
...@@ -213,8 +212,7 @@ int oncpu(struct pt_regs *ctx, struct task_struct *p) { ...@@ -213,8 +212,7 @@ int oncpu(struct pt_regs *ctx, struct task_struct *p) {
wokeby.delete(&pid); wokeby.delete(&pid);
} }
val = counts.lookup_or_init(&key, &zero); counts.increment(key, delta);
(*val) += delta;
return 0; return 0;
} }
""" """
...@@ -368,7 +366,7 @@ for k, v in sorted(counts.items(), key=lambda counts: counts[1].value): ...@@ -368,7 +366,7 @@ for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# print waker/wakee delimiter # print waker/wakee delimiter
print(" %-16s %s" % ("--", "--")) print(" %-16s %s" % ("--", "--"))
if not args.user_stacks_only: if not args.user_stacks_only:
if stack_id_err(k.t_k_stack_id): if stack_id_err(k.t_k_stack_id):
print(" [Missed Kernel Stack]") print(" [Missed Kernel Stack]")
......
...@@ -145,7 +145,6 @@ int do_perf_event(struct bpf_perf_event_data *ctx) { ...@@ -145,7 +145,6 @@ int do_perf_event(struct bpf_perf_event_data *ctx) {
return 0; return 0;
// create map key // create map key
u64 zero = 0, *val;
struct key_t key = {.pid = pid}; struct key_t key = {.pid = pid};
bpf_get_current_comm(&key.name, sizeof(key.name)); bpf_get_current_comm(&key.name, sizeof(key.name));
...@@ -180,8 +179,7 @@ int do_perf_event(struct bpf_perf_event_data *ctx) { ...@@ -180,8 +179,7 @@ int do_perf_event(struct bpf_perf_event_data *ctx) {
} }
} }
val = counts.lookup_or_init(&key, &zero); counts.increment(key);
(*val)++;
return 0; return 0;
} }
""" """
......
...@@ -110,8 +110,7 @@ if args.dist: ...@@ -110,8 +110,7 @@ if args.dist:
else: else:
bpf_text = bpf_text.replace('STORE', bpf_text = bpf_text.replace('STORE',
'key.vec = valp->vec; ' + 'key.vec = valp->vec; ' +
'u64 zero = 0, *vp = dist.lookup_or_init(&key, &zero); ' + 'dist.increment(key, delta);')
'(*vp) += delta;')
if debug or args.ebpf: if debug or args.ebpf:
print(bpf_text) print(bpf_text)
if args.ebpf: if args.ebpf:
......
...@@ -128,9 +128,7 @@ int trace_count(void *ctx) { ...@@ -128,9 +128,7 @@ int trace_count(void *ctx) {
key.tgid = GET_TGID; key.tgid = GET_TGID;
STORE_COMM STORE_COMM
%s %s
u64 zero = 0; counts.increment(key);
u64 *val = counts.lookup_or_init(&key, &zero);
(*val)++;
return 0; return 0;
} }
""" """
......
...@@ -131,8 +131,8 @@ int trace_tlp(struct pt_regs *ctx, struct sock *sk) ...@@ -131,8 +131,8 @@ int trace_tlp(struct pt_regs *ctx, struct sock *sk)
} }
""" """
struct_init = { 'ipv4': struct_init = { 'ipv4':
{ 'count' : { 'count' :
""" """
struct ipv4_flow_key_t flow_key = {}; struct ipv4_flow_key_t flow_key = {};
flow_key.saddr = skp->__sk_common.skc_rcv_saddr; flow_key.saddr = skp->__sk_common.skc_rcv_saddr;
...@@ -140,7 +140,7 @@ struct_init = { 'ipv4': ...@@ -140,7 +140,7 @@ struct_init = { 'ipv4':
// lport is host order // lport is host order
flow_key.lport = lport; flow_key.lport = lport;
flow_key.dport = ntohs(dport);""", flow_key.dport = ntohs(dport);""",
'trace' : 'trace' :
""" """
struct ipv4_data_t data4 = {.pid = pid, .ip = 4, .type = type}; struct ipv4_data_t data4 = {.pid = pid, .ip = 4, .type = type};
data4.saddr = skp->__sk_common.skc_rcv_saddr; data4.saddr = skp->__sk_common.skc_rcv_saddr;
...@@ -150,7 +150,7 @@ struct_init = { 'ipv4': ...@@ -150,7 +150,7 @@ struct_init = { 'ipv4':
data4.dport = ntohs(dport); data4.dport = ntohs(dport);
data4.state = state; """ data4.state = state; """
}, },
'ipv6': 'ipv6':
{ 'count' : { 'count' :
""" """
struct ipv6_flow_key_t flow_key = {}; struct ipv6_flow_key_t flow_key = {};
...@@ -175,10 +175,8 @@ struct_init = { 'ipv4': ...@@ -175,10 +175,8 @@ struct_init = { 'ipv4':
} }
count_core_base = """ count_core_base = """
u64 zero = 0, *val; COUNT_STRUCT.increment(flow_key);
val = COUNT_STRUCT.lookup_or_init(&flow_key, &zero); """
(*val)++;
"""
if args.count: if args.count:
bpf_text = bpf_text.replace("IPV4_INIT", struct_init['ipv4']['count']) bpf_text = bpf_text.replace("IPV4_INIT", struct_init['ipv4']['count'])
...@@ -266,7 +264,7 @@ def depict_cnt(counts_tab, l3prot='ipv4'): ...@@ -266,7 +264,7 @@ def depict_cnt(counts_tab, l3prot='ipv4'):
ep_fmt = "[%s]#%d" ep_fmt = "[%s]#%d"
if l3prot == 'ipv4': if l3prot == 'ipv4':
depict_key = "%-20s <-> %-20s" % (ep_fmt % (inet_ntop(AF_INET, pack('I', k.saddr)), k.lport), depict_key = "%-20s <-> %-20s" % (ep_fmt % (inet_ntop(AF_INET, pack('I', k.saddr)), k.lport),
ep_fmt % (inet_ntop(AF_INET, pack('I', k.daddr)), k.dport)) ep_fmt % (inet_ntop(AF_INET, pack('I', k.daddr)), k.dport))
else: else:
depict_key = "%-20s <-> %-20s" % (ep_fmt % (inet_ntop(AF_INET6, k.saddr), k.lport), depict_key = "%-20s <-> %-20s" % (ep_fmt % (inet_ntop(AF_INET6, k.saddr), k.lport),
ep_fmt % (inet_ntop(AF_INET6, k.daddr), k.dport)) ep_fmt % (inet_ntop(AF_INET6, k.daddr), k.dport))
...@@ -290,7 +288,7 @@ if args.count: ...@@ -290,7 +288,7 @@ if args.count:
# header # header
print("\n%-25s %-25s %-10s" % ( print("\n%-25s %-25s %-10s" % (
"LADDR:LPORT", "RADDR:RPORT", "RETRANSMITS")) "LADDR:LPORT", "RADDR:RPORT", "RETRANSMITS"))
depict_cnt(b.get_table("ipv4_count")) depict_cnt(b.get_table("ipv4_count"))
depict_cnt(b.get_table("ipv6_count"), l3prot='ipv6') depict_cnt(b.get_table("ipv6_count"), l3prot='ipv6')
# read events # read events
else: else:
......
...@@ -114,7 +114,6 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, ...@@ -114,7 +114,6 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
struct msghdr *msg, size_t size) struct msghdr *msg, size_t size)
{ {
u16 family = sk->__sk_common.skc_family; u16 family = sk->__sk_common.skc_family;
u64 *val, zero = 0;
if (family == AF_INET) { if (family == AF_INET) {
u32 dst = sk->__sk_common.skc_daddr; u32 dst = sk->__sk_common.skc_daddr;
...@@ -176,9 +175,8 @@ def generate_bpf_subnets(subnets): ...@@ -176,9 +175,8 @@ def generate_bpf_subnets(subnets):
if (!categorized && (__NET_ADDR__ & __NET_MASK__) == if (!categorized && (__NET_ADDR__ & __NET_MASK__) ==
(dst & __NET_MASK__)) { (dst & __NET_MASK__)) {
struct index_key_t key = {.index = __POS__}; struct index_key_t key = {.index = __POS__};
val = ipv4_send_bytes.lookup_or_init(&key, &zero); ipv4_send_bytes.increment(key, size);
categorized = 1; categorized = 1;
(*val) += size;
} }
""" """
bpf = '' bpf = ''
......
...@@ -104,7 +104,6 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, ...@@ -104,7 +104,6 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
u32 pid = bpf_get_current_pid_tgid(); u32 pid = bpf_get_current_pid_tgid();
FILTER FILTER
u16 dport = 0, family = sk->__sk_common.skc_family; u16 dport = 0, family = sk->__sk_common.skc_family;
u64 *val, zero = 0;
if (family == AF_INET) { if (family == AF_INET) {
struct ipv4_key_t ipv4_key = {.pid = pid}; struct ipv4_key_t ipv4_key = {.pid = pid};
...@@ -113,8 +112,7 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, ...@@ -113,8 +112,7 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
ipv4_key.lport = sk->__sk_common.skc_num; ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport; dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport); ipv4_key.dport = ntohs(dport);
val = ipv4_send_bytes.lookup_or_init(&ipv4_key, &zero); ipv4_send_bytes.increment(ipv4_key, size);
(*val) += size;
} else if (family == AF_INET6) { } else if (family == AF_INET6) {
struct ipv6_key_t ipv6_key = {.pid = pid}; struct ipv6_key_t ipv6_key = {.pid = pid};
...@@ -126,8 +124,8 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, ...@@ -126,8 +124,8 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
ipv6_key.lport = sk->__sk_common.skc_num; ipv6_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport; dport = sk->__sk_common.skc_dport;
ipv6_key.dport = ntohs(dport); ipv6_key.dport = ntohs(dport);
val = ipv6_send_bytes.lookup_or_init(&ipv6_key, &zero); ipv6_send_bytes.increment(ipv6_key, size);
(*val) += size;
} }
// else drop // else drop
...@@ -157,8 +155,8 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs *ctx, struct sock *sk, int copied) ...@@ -157,8 +155,8 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs *ctx, struct sock *sk, int copied)
ipv4_key.lport = sk->__sk_common.skc_num; ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport; dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport); ipv4_key.dport = ntohs(dport);
val = ipv4_recv_bytes.lookup_or_init(&ipv4_key, &zero); ipv4_recv_bytes.increment(ipv4_key, copied);
(*val) += copied;
} else if (family == AF_INET6) { } else if (family == AF_INET6) {
struct ipv6_key_t ipv6_key = {.pid = pid}; struct ipv6_key_t ipv6_key = {.pid = pid};
...@@ -169,8 +167,7 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs *ctx, struct sock *sk, int copied) ...@@ -169,8 +167,7 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs *ctx, struct sock *sk, int copied)
ipv6_key.lport = sk->__sk_common.skc_num; ipv6_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport; dport = sk->__sk_common.skc_dport;
ipv6_key.dport = ntohs(dport); ipv6_key.dport = ntohs(dport);
val = ipv6_recv_bytes.lookup_or_init(&ipv6_key, &zero); ipv6_recv_bytes.increment(ipv6_key, copied);
(*val) += copied;
} }
// else drop // else drop
......
...@@ -27,10 +27,8 @@ BPF_HASH(counts, struct key_t, u64, 256); ...@@ -27,10 +27,8 @@ BPF_HASH(counts, struct key_t, u64, 256);
int do_count(struct pt_regs *ctx) { int do_count(struct pt_regs *ctx) {
struct key_t key = {}; struct key_t key = {};
u64 zero = 0, *val;
key.ip = PT_REGS_IP(ctx); key.ip = PT_REGS_IP(ctx);
val = counts.lookup_or_init(&key, &zero); counts.increment(key);
(*val)++;
return 0; return 0;
} }
""") """)
......
...@@ -134,14 +134,12 @@ int waker(struct pt_regs *ctx, struct task_struct *p) { ...@@ -134,14 +134,12 @@ int waker(struct pt_regs *ctx, struct task_struct *p) {
return 0; return 0;
struct key_t key = {}; struct key_t key = {};
u64 zero = 0, *val;
key.w_k_stack_id = stack_traces.get_stackid(ctx, BPF_F_REUSE_STACKID); key.w_k_stack_id = stack_traces.get_stackid(ctx, BPF_F_REUSE_STACKID);
bpf_probe_read(&key.target, sizeof(key.target), p->comm); bpf_probe_read(&key.target, sizeof(key.target), p->comm);
bpf_get_current_comm(&key.waker, sizeof(key.waker)); bpf_get_current_comm(&key.waker, sizeof(key.waker));
val = counts.lookup_or_init(&key, &zero); counts.increment(key, delta);
(*val) += delta;
return 0; return 0;
} }
""" """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment