Commit 7c1c804c authored by 4ast's avatar 4ast Committed by GitHub

Merge pull request #1488 from palmtenor/add_helper

Update for new bpf_perf_{event|prog}_read_value helpers
parents 120ac968 35771cf9
......@@ -159,3 +159,5 @@ Helper | Kernel version | Commit
`BPF_FUNC_tail_call()` | 4.2 | [04fd61ab36ec](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=04fd61ab36ec065e194ab5e74ae34a5240d992bb)
`BPF_FUNC_trace_printk()` | 4.1 | [9c959c863f82](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=9c959c863f8217a2ff3d7c296e8223654d240569)
`BPF_FUNC_xdp_adjust_head()` | 4.10 | [17bedab27231](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=17bedab2723145d17b14084430743549e6943d03)
`BPF_FUNC_perf_event_read_value()` | 4.15 | [908432ca84fc](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=908432ca84fc229e906ba164219e9ad0fe56f755)
`BPF_FUNC_perf_prog_read_value()` | 4.15 | [4bebdc7a85aa](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=4bebdc7a85aa400c0222b5329861e4ad9252f1e5)
......@@ -15,7 +15,6 @@
*/
#include <linux/bpf.h>
#include <linux/perf_event.h>
#include <unistd.h>
#include <cstdio>
#include <cstring>
......@@ -408,8 +407,6 @@ StatusTuple BPF::open_perf_event(const std::string& name, uint32_t type,
name.c_str());
perf_event_arrays_[name] = new BPFPerfEventArray(it->second);
}
if (type != PERF_TYPE_RAW && type != PERF_TYPE_HARDWARE)
return StatusTuple(-1, "open_perf_event unsupported type");
auto table = perf_event_arrays_[name];
TRY2(table->open_all_cpu(type, config));
return StatusTuple(0);
......
......@@ -107,7 +107,7 @@ BPFStackTable::~BPFStackTable() {
}
void BPFStackTable::clear_table_non_atomic() {
for (int i = 0; i < capacity(); i++) {
for (int i = 0; size_t(i) < capacity(); i++) {
remove(&i);
}
}
......
......@@ -96,6 +96,7 @@ struct _name##_table_t { \
u32 leaf; \
/* counter = map.perf_read(index) */ \
u64 (*perf_read) (int); \
int (*perf_counter_value) (int, void *, u32); \
u32 max_entries; \
}; \
__attribute__((section("maps/perf_array"))) \
......@@ -245,6 +246,10 @@ static int (*bpf_perf_event_output)(void *ctx, void *map, u64 index, void *data,
(void *) BPF_FUNC_perf_event_output;
static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, u32 len) =
(void *) BPF_FUNC_skb_load_bytes;
static int (*bpf_perf_event_read_value)(void *map, u64 flags, void *buf, u32 buf_size) =
(void *) BPF_FUNC_perf_event_read_value;
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, u32 buf_size) =
(void *) BPF_FUNC_perf_prog_read_value;
/* bcc_get_stackid will return a negative value in the case of an error
*
......
......@@ -433,6 +433,9 @@ bool BTypeVisitor::VisitCallExpr(CallExpr *Call) {
} else if (memb_name == "perf_read") {
prefix = "bpf_perf_event_read";
suffix = ")";
} else if (memb_name == "perf_counter_value") {
prefix = "bpf_perf_event_read_value";
suffix = ")";
} else {
error(Call->getLocStart(), "invalid bpf_table operation %0") << memb_name;
return false;
......
......@@ -1061,7 +1061,8 @@ int bpf_attach_perf_event(int progfd, uint32_t ev_type, uint32_t ev_config,
struct perf_event_attr attr = {};
attr.type = ev_type;
attr.config = ev_config;
attr.inherit = 1;
if (pid > 0)
attr.inherit = 1;
if (sample_freq > 0) {
attr.freq = 1;
attr.sample_freq = sample_freq;
......
......@@ -16,6 +16,7 @@ add_executable(test_libbcc
test_array_table.cc
test_bpf_table.cc
test_hash_table.cc
test_perf_event.cc
test_usdt_args.cc
test_usdt_probes.cc
utils.cc)
......
......@@ -128,6 +128,8 @@ TEST_CASE("test bpf stack table", "[bpf_stack_table]") {
int zero = 0, *val;
val = id.lookup_or_init(&zero, &stack_id);
(*val) = stack_id;
return 0;
}
)";
......
/*
* Copyright (c) 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <linux/perf_event.h>
#include <linux/version.h>
#include <unistd.h>
#include <string>
#include "BPF.h"
#include "catch.hpp"
TEST_CASE("test read perf event", "[bpf_perf_event]") {
// The basic bpf_perf_event_read is supported since Kernel 4.3. However in that
// version it only supported HARDWARE and RAW events. On the other hand, our
// tests running on Jenkins won't have availiable HARDWARE counters since they
// are running on VMs. The support of other types of events such as SOFTWARE are
// only added since Kernel 4.13, hence we can only run the test since that.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
const std::string BPF_PROGRAM = R"(
BPF_PERF_ARRAY(cnt, NUM_CPUS);
BPF_HASH(val, int, u64, 1);
BPF_HASH(ret, int, int, 1);
BPF_HASH(counter, int, struct bpf_perf_event_value, 1);
int on_sys_getuid(void *ctx) {
int zero = 0;
u64 v = cnt.perf_read(CUR_CPU_IDENTIFIER);
if (((s64)v < 0) && ((s64)v > -256))
return 0;
val.update(&zero, &v);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
u32 cpu = bpf_get_smp_processor_id();
struct bpf_perf_event_value c = {0};
int r = cnt.perf_counter_value(cpu, &c, sizeof(c));
ret.update(&zero, &r);
counter.update(&zero, &c);
#endif
return 0;
}
)";
ebpf::BPF bpf;
ebpf::StatusTuple res(0);
res = bpf.init(
BPF_PROGRAM,
{"-DNUM_CPUS=" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN))}, {});
REQUIRE(res.code() == 0);
res =
bpf.open_perf_event("cnt", PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK);
REQUIRE(res.code() == 0);
res = bpf.attach_kprobe("sys_getuid", "on_sys_getuid");
REQUIRE(res.code() == 0);
REQUIRE(getuid() >= 0);
res = bpf.detach_kprobe("sys_getuid");
REQUIRE(res.code() == 0);
res = bpf.close_perf_event("cnt");
REQUIRE(res.code() == 0);
auto val = bpf.get_hash_table<int, uint64_t>("val");
REQUIRE(val[0] >= 0);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
auto counter_table =
bpf.get_hash_table<int, struct bpf_perf_event_value>("counter");
auto counter = counter_table[0];
auto ret = bpf.get_hash_table<int, int>("ret");
REQUIRE(ret[0] == 0);
REQUIRE(counter.counter >= 0);
REQUIRE(counter.enabled > 0);
REQUIRE(counter.running >= 0);
REQUIRE(counter.running <= counter.enabled);
#endif
}
TEST_CASE("test attach perf event", "[bpf_perf_event]") {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
const std::string BPF_PROGRAM = R"(
BPF_HASH(pid, int, u64, 1);
BPF_HASH(ret, int, int, 1);
BPF_HASH(counter, int, struct bpf_perf_event_value, 1);
int on_event(void *ctx) {
int zero = 0;
u64 p = bpf_get_current_pid_tgid();
pid.update(&zero, &p);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
struct bpf_perf_event_value c = {0};
int r = bpf_perf_prog_read_value(ctx, &c, sizeof(c));
ret.update(&zero, &r);
counter.update(&zero, &c);
#endif
return 0;
}
)";
ebpf::BPF bpf;
ebpf::StatusTuple res(0);
res = bpf.init(BPF_PROGRAM);
REQUIRE(res.code() == 0);
res = bpf.attach_perf_event(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK,
"on_event", 0, 1000);
REQUIRE(res.code() == 0);
sleep(1);
res = bpf.detach_perf_event(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK);
REQUIRE(res.code() == 0);
auto pid = bpf.get_hash_table<int, uint64_t>("pid");
REQUIRE(pid[0] >= 0);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
auto counter_table =
bpf.get_hash_table<int, struct bpf_perf_event_value>("counter");
auto counter = counter_table[0];
auto ret = bpf.get_hash_table<int, int>("ret");
REQUIRE(ret[0] == 0);
REQUIRE(counter.counter >= 0);
REQUIRE(counter.enabled >= 1000000000);
REQUIRE(counter.running >= 0);
REQUIRE(counter.running <= counter.enabled);
#endif
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment