Commit 211a989b authored by 4ast's avatar 4ast Committed by GitHub

Merge pull request #836 from shodoco/lruhash

bcc: add lru_hash and lru_percpu_hash map types
parents 5647de0e 906af6fb
......@@ -85,6 +85,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_ARRAY,
BPF_MAP_TYPE_STACK_TRACE,
BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH,
};
enum bpf_prog_type {
......@@ -106,6 +108,13 @@ enum bpf_prog_type {
#define BPF_EXIST 2 /* update existing element */
#define BPF_F_NO_PREALLOC (1U << 0)
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists.
*/
#define BPF_F_NO_COMMON_LRU (1U << 1)
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
......
......@@ -86,6 +86,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_ARRAY,
BPF_MAP_TYPE_STACK_TRACE,
BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH,
};
enum bpf_prog_type {
......@@ -96,6 +98,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
BPF_PROG_TYPE_PERF_EVENT,
};
#define BPF_PSEUDO_MAP_FD 1
......@@ -106,6 +109,13 @@ enum bpf_prog_type {
#define BPF_EXIST 2 /* update existing element */
#define BPF_F_NO_PREALLOC (1U << 0)
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists.
*/
#define BPF_F_NO_COMMON_LRU (1U << 1)
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
......
......@@ -639,6 +639,10 @@ bool BTypeVisitor::VisitVarDecl(VarDecl *Decl) {
map_type = BPF_MAP_TYPE_PERCPU_HASH;
} else if (A->getName() == "maps/percpu_array") {
map_type = BPF_MAP_TYPE_PERCPU_ARRAY;
} else if (A->getName() == "maps/lru_hash") {
map_type = BPF_MAP_TYPE_LRU_HASH;
} else if (A->getName() == "maps/lru_percpu_hash") {
map_type = BPF_MAP_TYPE_LRU_PERCPU_HASH;
} else if (A->getName() == "maps/histogram") {
if (table.key_desc == "\"int\"")
map_type = BPF_MAP_TYPE_ARRAY;
......
......@@ -26,6 +26,9 @@ BaseTable.static.BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
BaseTable.static.BPF_MAP_TYPE_PERCPU_HASH = 5
BaseTable.static.BPF_MAP_TYPE_PERCPU_ARRAY = 6
BaseTable.static.BPF_MAP_TYPE_STACK_TRACE = 7
BaseTable.static.BPF_MAP_TYPE_CGROUP_ARRAY = 8
BaseTable.static.BPF_MAP_TYPE_LRU_HASH = 9
BaseTable.static.BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
function BaseTable:initialize(t_type, bpf, map_id, map_fd, key_type, leaf_type)
assert(t_type == libbcc.bpf_table_type_id(bpf.module, map_id))
......
......@@ -138,9 +138,13 @@ else
S.c.BPF_MAP.PERCPU_ARRAY = 6
S.c.BPF_MAP.STACK_TRACE = 7
S.c.BPF_MAP.CGROUP_ARRAY = 8
S.c.BPF_MAP.LRU_HASH = 9
S.c.BPF_MAP.LRU_PERCPU_HASH = 10
end
if not S.c.BPF_PROG.TRACEPOINT then
S.c.BPF_PROG.TRACEPOINT = 5
S.c.BPF_PROG.XDP = 6
S.c.BPF_PROG.PERF_EVENT = 7
end
end
......
......@@ -28,6 +28,9 @@ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
BPF_MAP_TYPE_PERCPU_HASH = 5
BPF_MAP_TYPE_PERCPU_ARRAY = 6
BPF_MAP_TYPE_STACK_TRACE = 7
BPF_MAP_TYPE_CGROUP_ARRAY = 8
BPF_MAP_TYPE_LRU_HASH = 9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
stars_max = 40
......@@ -97,6 +100,10 @@ def Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs):
t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_STACK_TRACE:
t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_HASH:
t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
if t == None:
raise Exception("Unknown table type %d" % ttype)
return t
......@@ -299,6 +306,9 @@ class HashTable(TableBase):
if res < 0:
raise KeyError
class LruHash(HashTable):
def __init__(self, *args, **kwargs):
super(LruHash, self).__init__(*args, **kwargs)
class ArrayBase(TableBase):
def __init__(self, *args, **kwargs):
......@@ -526,6 +536,10 @@ class PerCpuHash(HashTable):
result.value/=self.total_cpu
return result
class LruPerCpuHash(PerCpuHash):
def __init__(self, *args, **kwargs):
super(LruPerCpuHash, self).__init__(*args, **kwargs)
class PerCpuArray(ArrayBase):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
......
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
import ctypes as ct
import os
import unittest
from bcc import BPF
import multiprocessing
class TestLru(unittest.TestCase):
def test_lru_hash(self):
b = BPF(text="""BPF_TABLE("lru_hash", int, u64, lru, 1024);""")
t = b["lru"]
for i in range(1, 1032):
t[ct.c_int(i)] = ct.c_ulonglong(i)
for i, v in t.items():
self.assertEqual(v.value, i.value)
# BPF_MAP_TYPE_LRU_HASH eviction happens in batch and we expect less
# items than specified size.
self.assertLess(len(t), 1024);
def test_lru_percpu_hash(self):
test_prog1 = """
BPF_TABLE("lru_percpu_hash", u32, u32, stats, 1);
int hello_world(void *ctx) {
u32 key=0;
u32 value = 0, *val;
val = stats.lookup_or_init(&key, &value);
*val += 1;
return 0;
}
"""
b = BPF(text=test_prog1)
stats_map = b.get_table("stats")
b.attach_kprobe(event="sys_clone", fn_name="hello_world")
ini = stats_map.Leaf()
for i in range(0, multiprocessing.cpu_count()):
ini[i] = 0
# First initialize with key 1
stats_map[ stats_map.Key(1) ] = ini
# Then initialize with key 0
stats_map[ stats_map.Key(0) ] = ini
# Key 1 should have been evicted
with self.assertRaises(KeyError):
val = stats_map[ stats_map.Key(1) ]
f = os.popen("hostname")
f.close()
self.assertEqual(len(stats_map),1)
val = stats_map[ stats_map.Key(0) ]
sum = stats_map.sum(stats_map.Key(0))
avg = stats_map.average(stats_map.Key(0))
max = stats_map.max(stats_map.Key(0))
self.assertGreater(sum.value, 0L)
self.assertGreater(max.value, 0L)
b.detach_kprobe("sys_clone")
if __name__ == "__main__":
unittest.main()
......@@ -20,7 +20,6 @@ class TestPercpu(unittest.TestCase):
return 0;
}
"""
self.addCleanup(self.cleanup)
bpf_code = BPF(text=test_prog1)
stats_map = bpf_code.get_table("stats")
bpf_code.attach_kprobe(event="sys_clone", fn_name="hello_world")
......@@ -37,6 +36,7 @@ class TestPercpu(unittest.TestCase):
max = stats_map.max(stats_map.Key(0))
self.assertGreater(sum.value, 0L)
self.assertGreater(max.value, 0L)
bpf_code.detach_kprobe("sys_clone")
def test_u32(self):
test_prog1 = """
......@@ -49,7 +49,6 @@ class TestPercpu(unittest.TestCase):
return 0;
}
"""
self.addCleanup(self.cleanup)
bpf_code = BPF(text=test_prog1)
stats_map = bpf_code.get_table("stats")
bpf_code.attach_kprobe(event="sys_clone", fn_name="hello_world")
......@@ -66,6 +65,7 @@ class TestPercpu(unittest.TestCase):
max = stats_map.max(stats_map.Key(0))
self.assertGreater(sum.value, 0L)
self.assertGreater(max.value, 0L)
bpf_code.detach_kprobe("sys_clone")
def test_struct_custom_func(self):
test_prog2 = """
......@@ -83,7 +83,6 @@ class TestPercpu(unittest.TestCase):
return 0;
}
"""
self.addCleanup(self.cleanup)
bpf_code = BPF(text=test_prog2)
stats_map = bpf_code.get_table("stats",
reducer=lambda x,y: stats_map.sLeaf(x.c1+y.c1))
......@@ -97,9 +96,7 @@ class TestPercpu(unittest.TestCase):
self.assertEqual(len(stats_map),1)
k = stats_map[ stats_map.Key(0) ]
self.assertGreater(k.c1, 0L)
def cleanup(self):
BPF.detach_kprobe("sys_clone")
bpf_code.detach_kprobe("sys_clone")
if __name__ == "__main__":
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment