Commit 64bf9657 authored by Javier Honduvilla Coto's avatar Javier Honduvilla Coto Committed by yonghong-song

Update scripts to use `increment` (#1905)

* Replace boilerplate for increment with a call to `increment`

from new tooling. Found cases to replace using `ripgrep`[1]:
```
$ rg '\(\*\w+\)\s*\+\+' -l | grep tools | grep -v old
```

[1]: https://github.com/BurntSushi/ripgrep

* Replace boilerplate for bigger than 1 increments with the new
`increment` call

from new tooling. Found cases to replace using `ripgrep`[1]:
```
$ rg '\(\*\w+\)\s*\+=' -l | grep tools | grep -v old
```

[1]: https://github.com/BurntSushi/ripgrep

* Update examples indicating the alternative increment call for hash tables
parent 7c4c5b9f
......@@ -557,16 +557,19 @@ struct key_t {
BPF_HASH(counts, struct key_t);
int count(struct pt_regs *ctx) {
if (!PT_REGS_PARM1(ctx))
return 0;
if (!PT_REGS_PARM1(ctx))
return 0;
struct key_t key = {};
u64 zero = 0, *val;
struct key_t key = {};
u64 zero = 0, *val;
bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx));
val = counts.lookup_or_init(&key, &zero);
(*val)++;
return 0;
bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx));
// another possibility is using `counts.increment(key);`. It allows a second
// optional parameter to specify the increment step
val = counts.lookup_or_init(&key, &zero);
(*val)++;
return 0;
};
""")
b.attach_uprobe(name="c", sym="strlen", fn_name="count")
......@@ -681,6 +684,8 @@ int count_sched(struct pt_regs *ctx, struct task_struct *prev) {
key.curr_pid = bpf_get_current_pid_tgid();
key.prev_pid = prev->pid;
// another possibility is using `counts.increment(key);`. It allows a second
// optional parameter to specify the increment step
val = stats.lookup_or_init(&key, &zero);
(*val)++;
return 0;
......
......@@ -33,6 +33,7 @@ int alloc_enter(struct pt_regs *ctx, size_t size) {
if (key < 0)
return 0;
// could also use `calls.increment(key, size);`
u64 zero = 0, *val;
val = calls.lookup_or_init(&key, &zero);
(*val) += size;
......
......@@ -31,6 +31,7 @@ int count(struct pt_regs *ctx) {
u64 zero = 0, *val;
bpf_probe_read(&key.c, sizeof(key.c), (void *)PT_REGS_PARM1(ctx));
// could also use `counts.increment(key)`
val = counts.lookup_or_init(&key, &zero);
(*val)++;
return 0;
......
......@@ -22,6 +22,7 @@ int count_sched(struct pt_regs *ctx, struct task_struct *prev) {
key.curr_pid = bpf_get_current_pid_tgid();
key.prev_pid = prev->pid;
// could also use `stats.increment(key);`
val = stats.lookup_or_init(&key, &zero);
(*val)++;
return 0;
......
......@@ -94,12 +94,10 @@ BPF_HASH(counts, struct key_t);
int do_count(struct pt_regs *ctx) {
struct key_t key = {};
u64 zero = 0, *val;
u64 ip;
key.ip = PT_REGS_IP(ctx);
val = counts.lookup_or_init(&key, &zero); // update counter
(*val)++;
counts.increment(key); // update counter
return 0;
}
......
......@@ -153,7 +153,6 @@ def handle_loop(stdscr, args):
int do_count(struct pt_regs *ctx) {
struct key_t key = {};
u64 zero = 0 , *val;
u64 pid = bpf_get_current_pid_tgid();
u32 uid = bpf_get_current_uid_gid();
......@@ -162,8 +161,7 @@ def handle_loop(stdscr, args):
key.uid = uid & 0xFFFFFFFF;
bpf_get_current_comm(&(key.comm), 16);
val = counts.lookup_or_init(&key, &zero); // update counter
(*val)++;
counts.increment(key);
return 0;
}
......
......@@ -84,8 +84,7 @@ int count_only(struct pt_regs *ctx, struct irq_desc *desc)
irq_key_t key = {.slot = 0 /* ignore */};
bpf_probe_read(&key.name, sizeof(key.name), name);
u64 zero = 0, *vp = dist.lookup_or_init(&key, &zero);
(*vp)++;
dist.increment(key);
return 0;
}
......@@ -136,8 +135,7 @@ else:
bpf_text = bpf_text.replace('STORE',
'irq_key_t key = {.slot = 0 /* ignore */};' +
'bpf_probe_read(&key.name, sizeof(key.name), name);' +
'u64 zero = 0, *vp = dist.lookup_or_init(&key, &zero);' +
'(*vp) += delta;')
'dist.increment(key, delta);')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
......
......@@ -58,9 +58,7 @@ int on_cache_miss(struct bpf_perf_event_data *ctx) {
struct key_t key = {};
get_key(&key);
u64 zero = 0, *val;
val = miss_count.lookup_or_init(&key, &zero);
(*val) += ctx->sample_period;
miss_count.increment(key, ctx->sample_period);
return 0;
}
......@@ -69,9 +67,7 @@ int on_cache_ref(struct bpf_perf_event_data *ctx) {
struct key_t key = {};
get_key(&key);
u64 zero = 0, *val;
val = ref_count.lookup_or_init(&key, &zero);
(*val) += ctx->sample_period;
ref_count.increment(key, ctx->sample_period);
return 0;
}
......
......@@ -156,7 +156,6 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
}
// create map key
u64 zero = 0, *val;
struct key_t key = {};
key.pid = pid;
......@@ -165,8 +164,7 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
key.kernel_stack_id = KERNEL_STACK_GET;
bpf_get_current_comm(&key.name, sizeof(key.name));
val = counts.lookup_or_init(&key, &zero);
(*val) += delta;
counts.increment(key, delta);
return 0;
}
"""
......
......@@ -193,7 +193,6 @@ int oncpu(struct pt_regs *ctx, struct task_struct *p) {
}
// create map key
u64 zero = 0, *val;
struct key_t key = {};
struct wokeby_t *woke;
......@@ -213,8 +212,7 @@ int oncpu(struct pt_regs *ctx, struct task_struct *p) {
wokeby.delete(&pid);
}
val = counts.lookup_or_init(&key, &zero);
(*val) += delta;
counts.increment(key, delta);
return 0;
}
"""
......@@ -368,7 +366,7 @@ for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# print waker/wakee delimiter
print(" %-16s %s" % ("--", "--"))
if not args.user_stacks_only:
if stack_id_err(k.t_k_stack_id):
print(" [Missed Kernel Stack]")
......
......@@ -145,7 +145,6 @@ int do_perf_event(struct bpf_perf_event_data *ctx) {
return 0;
// create map key
u64 zero = 0, *val;
struct key_t key = {.pid = pid};
bpf_get_current_comm(&key.name, sizeof(key.name));
......@@ -180,8 +179,7 @@ int do_perf_event(struct bpf_perf_event_data *ctx) {
}
}
val = counts.lookup_or_init(&key, &zero);
(*val)++;
counts.increment(key);
return 0;
}
"""
......
......@@ -110,8 +110,7 @@ if args.dist:
else:
bpf_text = bpf_text.replace('STORE',
'key.vec = valp->vec; ' +
'u64 zero = 0, *vp = dist.lookup_or_init(&key, &zero); ' +
'(*vp) += delta;')
'dist.increment(key, delta);')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
......
......@@ -128,9 +128,7 @@ int trace_count(void *ctx) {
key.tgid = GET_TGID;
STORE_COMM
%s
u64 zero = 0;
u64 *val = counts.lookup_or_init(&key, &zero);
(*val)++;
counts.increment(key);
return 0;
}
"""
......
......@@ -131,8 +131,8 @@ int trace_tlp(struct pt_regs *ctx, struct sock *sk)
}
"""
struct_init = { 'ipv4':
{ 'count' :
struct_init = { 'ipv4':
{ 'count' :
"""
struct ipv4_flow_key_t flow_key = {};
flow_key.saddr = skp->__sk_common.skc_rcv_saddr;
......@@ -140,7 +140,7 @@ struct_init = { 'ipv4':
// lport is host order
flow_key.lport = lport;
flow_key.dport = ntohs(dport);""",
'trace' :
'trace' :
"""
struct ipv4_data_t data4 = {.pid = pid, .ip = 4, .type = type};
data4.saddr = skp->__sk_common.skc_rcv_saddr;
......@@ -150,7 +150,7 @@ struct_init = { 'ipv4':
data4.dport = ntohs(dport);
data4.state = state; """
},
'ipv6':
'ipv6':
{ 'count' :
"""
struct ipv6_flow_key_t flow_key = {};
......@@ -175,10 +175,8 @@ struct_init = { 'ipv4':
}
count_core_base = """
u64 zero = 0, *val;
val = COUNT_STRUCT.lookup_or_init(&flow_key, &zero);
(*val)++;
"""
COUNT_STRUCT.increment(flow_key);
"""
if args.count:
bpf_text = bpf_text.replace("IPV4_INIT", struct_init['ipv4']['count'])
......@@ -266,7 +264,7 @@ def depict_cnt(counts_tab, l3prot='ipv4'):
ep_fmt = "[%s]#%d"
if l3prot == 'ipv4':
depict_key = "%-20s <-> %-20s" % (ep_fmt % (inet_ntop(AF_INET, pack('I', k.saddr)), k.lport),
ep_fmt % (inet_ntop(AF_INET, pack('I', k.daddr)), k.dport))
ep_fmt % (inet_ntop(AF_INET, pack('I', k.daddr)), k.dport))
else:
depict_key = "%-20s <-> %-20s" % (ep_fmt % (inet_ntop(AF_INET6, k.saddr), k.lport),
ep_fmt % (inet_ntop(AF_INET6, k.daddr), k.dport))
......@@ -290,7 +288,7 @@ if args.count:
# header
print("\n%-25s %-25s %-10s" % (
"LADDR:LPORT", "RADDR:RPORT", "RETRANSMITS"))
depict_cnt(b.get_table("ipv4_count"))
depict_cnt(b.get_table("ipv4_count"))
depict_cnt(b.get_table("ipv6_count"), l3prot='ipv6')
# read events
else:
......
......@@ -114,7 +114,6 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
struct msghdr *msg, size_t size)
{
u16 family = sk->__sk_common.skc_family;
u64 *val, zero = 0;
if (family == AF_INET) {
u32 dst = sk->__sk_common.skc_daddr;
......@@ -176,9 +175,8 @@ def generate_bpf_subnets(subnets):
if (!categorized && (__NET_ADDR__ & __NET_MASK__) ==
(dst & __NET_MASK__)) {
struct index_key_t key = {.index = __POS__};
val = ipv4_send_bytes.lookup_or_init(&key, &zero);
ipv4_send_bytes.increment(key, size);
categorized = 1;
(*val) += size;
}
"""
bpf = ''
......
......@@ -104,7 +104,6 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
u32 pid = bpf_get_current_pid_tgid();
FILTER
u16 dport = 0, family = sk->__sk_common.skc_family;
u64 *val, zero = 0;
if (family == AF_INET) {
struct ipv4_key_t ipv4_key = {.pid = pid};
......@@ -113,8 +112,7 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport);
val = ipv4_send_bytes.lookup_or_init(&ipv4_key, &zero);
(*val) += size;
ipv4_send_bytes.increment(ipv4_key, size);
} else if (family == AF_INET6) {
struct ipv6_key_t ipv6_key = {.pid = pid};
......@@ -126,8 +124,8 @@ int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
ipv6_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv6_key.dport = ntohs(dport);
val = ipv6_send_bytes.lookup_or_init(&ipv6_key, &zero);
(*val) += size;
ipv6_send_bytes.increment(ipv6_key, size);
}
// else drop
......@@ -157,8 +155,8 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs *ctx, struct sock *sk, int copied)
ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport);
val = ipv4_recv_bytes.lookup_or_init(&ipv4_key, &zero);
(*val) += copied;
ipv4_recv_bytes.increment(ipv4_key, copied);
} else if (family == AF_INET6) {
struct ipv6_key_t ipv6_key = {.pid = pid};
......@@ -169,8 +167,7 @@ int kprobe__tcp_cleanup_rbuf(struct pt_regs *ctx, struct sock *sk, int copied)
ipv6_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv6_key.dport = ntohs(dport);
val = ipv6_recv_bytes.lookup_or_init(&ipv6_key, &zero);
(*val) += copied;
ipv6_recv_bytes.increment(ipv6_key, copied);
}
// else drop
......
......@@ -27,10 +27,8 @@ BPF_HASH(counts, struct key_t, u64, 256);
int do_count(struct pt_regs *ctx) {
struct key_t key = {};
u64 zero = 0, *val;
key.ip = PT_REGS_IP(ctx);
val = counts.lookup_or_init(&key, &zero);
(*val)++;
counts.increment(key);
return 0;
}
""")
......
......@@ -134,14 +134,12 @@ int waker(struct pt_regs *ctx, struct task_struct *p) {
return 0;
struct key_t key = {};
u64 zero = 0, *val;
key.w_k_stack_id = stack_traces.get_stackid(ctx, BPF_F_REUSE_STACKID);
bpf_probe_read(&key.target, sizeof(key.target), p->comm);
bpf_get_current_comm(&key.waker, sizeof(key.waker));
val = counts.lookup_or_init(&key, &zero);
(*val) += delta;
counts.increment(key, delta);
return 0;
}
"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment