Commit 63b6e44e authored by Teng Qin's avatar Teng Qin

Improve clear table API in C++

parent 5d23500a
......@@ -142,6 +142,45 @@ StatusTuple BPFTable::remove_value(const std::string& key_str) {
return StatusTuple(0);
}
StatusTuple BPFTable::clear_table_non_atomic() {
if (desc.type == BPF_MAP_TYPE_HASH ||
desc.type == BPF_MAP_TYPE_PERCPU_HASH ||
desc.type == BPF_MAP_TYPE_LRU_HASH ||
desc.type == BPF_MAP_TYPE_PERCPU_HASH ||
desc.type == BPF_MAP_TYPE_HASH_OF_MAPS) {
// For hash maps, use the first() interface (which uses get_next_key) to
// iterate through the map and clear elements
auto key = std::unique_ptr<void, decltype(::free)*>(
::malloc(desc.key_size),
::free);
while (this->first(key.get()))
if (!this->remove(key.get())) {
return StatusTuple(
-1,
"Failed to delete element when clearing table %s",
desc.name.c_str());
}
} else if (desc.type == BPF_MAP_TYPE_ARRAY ||
desc.type == BPF_MAP_TYPE_PERCPU_ARRAY) {
return StatusTuple(
-1, "Array map %s do not support clearing elements", desc.name.c_str());
} else if (desc.type == BPF_MAP_TYPE_PROG_ARRAY ||
desc.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
desc.type == BPF_MAP_TYPE_STACK_TRACE ||
desc.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
// For Stack-trace and FD arrays, just iterate over all indices
for (size_t i = 0; i < desc.max_entries; i++) {
this->remove(&i);
}
} else {
return StatusTuple(
-1, "Clearing for map type of %s not supported yet", desc.name.c_str());
}
return StatusTuple(0);
}
size_t BPFTable::get_possible_cpu_count() {
return get_possible_cpus().size();
}
......@@ -157,8 +196,8 @@ BPFStackTable::BPFStackTable(const TableDesc& desc,
};
}
BPFStackTable::BPFStackTable(BPFStackTable&& that) :
BPFTableBase<int, stacktrace_t>(that.desc),
BPFStackTable::BPFStackTable(BPFStackTable&& that)
: BPFTableBase<int, stacktrace_t>(that.desc),
symbol_option_(std::move(that.symbol_option_)),
pid_sym_(std::move(that.pid_sym_)) {
that.pid_sym_.clear();
......@@ -214,8 +253,8 @@ std::vector<std::string> BPFStackTable::get_stack_symbol(int stack_id,
}
StatusTuple BPFPerfBuffer::open_on_cpu(perf_reader_raw_cb cb,
perf_reader_lost_cb lost_cb,
int cpu, void* cb_cookie, int page_cnt) {
perf_reader_lost_cb lost_cb, int cpu,
void* cb_cookie, int page_cnt) {
if (cpu_readers_.find(cpu) != cpu_readers_.end())
return StatusTuple(-1, "Perf buffer already open on CPU %d", cpu);
......
......@@ -105,6 +105,8 @@ class BPFTable : public BPFTableBase<void, void> {
StatusTuple remove_value(const std::string& key_str);
StatusTuple clear_table_non_atomic();
static size_t get_possible_cpu_count();
};
......@@ -237,14 +239,8 @@ class BPFHashTable : public BPFTableBase<KeyType, ValueType> {
StatusTuple clear_table_non_atomic() {
KeyType cur;
if (!this->first(&cur))
return StatusTuple(0);
while (true) {
while (this->first(&cur))
TRY2(remove_value(cur));
if (!this->next(&cur, &cur))
break;
}
return StatusTuple(0);
}
......
......@@ -54,6 +54,16 @@ TEST_CASE("test bpf table", "[bpf_table]") {
res = t.get_value("0x11", value);
REQUIRE(res.code() != 0);
// clear table
res = t.update_value("0x15", "0x888");
REQUIRE(res.code() == 0);
auto elements = bpf->get_hash_table<int, int>("myhash").get_table_offline();
REQUIRE(elements.size() == 2);
res = t.clear_table_non_atomic();
REQUIRE(res.code() == 0);
elements = bpf->get_hash_table<int, int>("myhash").get_table_offline();
REQUIRE(elements.size() == 0);
// delete bpf_module, call to key/leaf printf/scanf must fail
delete bpf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment