Commit c848e557 authored by 4ast's avatar 4ast

Merge pull request #15 from plumgrid/clang_b_lang

Add clang support to bcc
parents 691692ec 7009b559
......@@ -22,6 +22,7 @@ find_library(libclangEdit NAMES clangEdit HINTS ${CLANG_SEARCH})
find_library(libclangFrontend NAMES clangFrontend HINTS ${CLANG_SEARCH})
find_library(libclangLex NAMES clangLex HINTS ${CLANG_SEARCH})
find_library(libclangParse NAMES clangParse HINTS ${CLANG_SEARCH})
find_library(libclangRewrite NAMES clangRewrite HINTS ${CLANG_SEARCH})
find_library(libclangSema NAMES clangSema HINTS ${CLANG_SEARCH})
find_library(libclangSerialization NAMES clangSerialization HINTS ${CLANG_SEARCH})
......
......@@ -13,6 +13,8 @@ lib.bpf_program_size.restype = ct.c_size_t
lib.bpf_program_size.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_program_license.restype = ct.c_char_p
lib.bpf_program_license.argtypes = [ct.c_void_p]
lib.bpf_program_kern_version.restype = ct.c_uint
lib.bpf_program_kern_version.argtypes = [ct.c_void_p]
lib.bpf_program_table_fd.restype = ct.c_int
lib.bpf_program_table_fd.argtypes = [ct.c_void_p, ct.c_char_p]
......@@ -33,7 +35,7 @@ lib.bpf_attach_filter.restype = ct.c_int
lib.bpf_attach_filter.argtypes = [ct.c_int, ct.c_char_p, ct.c_uint, ct.c_ubyte, ct.c_uint]
lib.bpf_prog_load.restype = ct.c_int
lib.bpf_prog_load.argtypes = [ct.c_int, ct.c_void_p, ct.c_size_t,
ct.c_char_p]
ct.c_char_p, ct.c_uint]
lib.bpf_attach_kprobe.restype = ct.c_int
lib.bpf_attach_kprobe.argtypes = [ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_int, ct.c_int, ct.c_int]
......@@ -67,7 +69,8 @@ class BPF(object):
self.fd[prog_name] = lib.bpf_prog_load(self.prog_type,
lib.bpf_program_start(self.prog, prog_name.encode("ascii")),
lib.bpf_program_size(self.prog, prog_name.encode("ascii")),
lib.bpf_program_license(self.prog))
lib.bpf_program_license(self.prog),
lib.bpf_program_kern_version(self.prog))
if self.fd[prog_name] < 0:
print((ct.c_char * 65536).in_dll(lib, "bpf_log_buf").value)
......
......@@ -10,16 +10,17 @@ ADD_FLEX_BISON_DEPENDENCY(Lexer Parser)
set(CMAKE_SHARED_LINKER_FLAGS "-static-libstdc++ -Wl,--exclude-libs=ALL")
add_library(bpfprog SHARED bpf_common.cc bpf_program.cc codegen_llvm.cc
node.cc parser.cc printer.cc type_check.cc libbpf.c
node.cc parser.cc printer.cc type_check.cc libbpf.c b_frontend_action.cc
kbuild_helper.cc
${BISON_Parser_OUTPUTS} ${FLEX_Lexer_OUTPUTS})
# BPF is still experimental otherwise it should be available
#llvm_map_components_to_libnames(llvm_libs bpf mcjit irreader passes)
llvm_map_components_to_libnames(llvm_libs mcjit irreader passes linker instrumentation objcarcopts bitwriter option)
# order is important
set(clang_libs ${libclangFrontend} ${libclangParse} ${libclangSema} ${libclangCodeGen}
${libclangDriver} ${libclangAnalysis} ${libclangSerialization} ${libclangEdit}
${libclangLex} ${libclangAST} ${libclangBasic})
set(clang_libs ${libclangFrontend} ${libclangSerialization} ${libclangDriver} ${libclangParse}
${libclangSema} ${libclangCodeGen} ${libclangAnalysis} ${libclangRewrite} ${libclangEdit}
${libclangAST} ${libclangLex} ${libclangBasic})
# Link against LLVM libraries
target_link_libraries(bpfprog ${clang_libs} ${llvm_libs} LLVMBPFCodeGen mnl)
#include <linux/bpf.h>
#include <clang/AST/ASTConsumer.h>
#include <clang/AST/ASTContext.h>
#include <clang/AST/RecordLayout.h>
#include <clang/Frontend/CompilerInstance.h>
#include <clang/Rewrite/Core/Rewriter.h>
#include "b_frontend_action.h"
extern "C"
int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
int max_entries);
namespace ebpf {
using std::map;
using std::string;
using std::unique_ptr;
using namespace clang;
BTypeVisitor::BTypeVisitor(ASTContext &C, Rewriter &rewriter, map<string, BPFTable> &tables)
: C(C), rewriter_(rewriter), out_(llvm::errs()), tables_(tables) {
}
bool BTypeVisitor::VisitFunctionDecl(FunctionDecl *D) {
return true;
}
// convert calls of the type:
// table.foo(&key)
// to:
// bpf_table_foo_elem(bpf_pseudo_fd(table), &key [,&leaf])
bool BTypeVisitor::VisitCallExpr(CallExpr *Call) {
// make sure node is a reference to a bpf table, which is assured by the
// presence of the section("maps/<typename>") GNU __attribute__
if (MemberExpr *Memb = dyn_cast<MemberExpr>(Call->getCallee()->IgnoreImplicit())) {
StringRef memb_name = Memb->getMemberDecl()->getName();
if (DeclRefExpr *Ref = dyn_cast<DeclRefExpr>(Memb->getBase())) {
if (SectionAttr *A = Ref->getDecl()->getAttr<SectionAttr>()) {
if (!A->getName().startswith("maps"))
return true;
// find the table fd, which was opened at declaration time
auto table_it = tables_.find(Ref->getDecl()->getName());
if (table_it == tables_.end()) {
C.getDiagnostics().Report(Ref->getLocEnd(), diag::err_expected)
<< "initialized handle for bpf_table";
return false;
}
string fd = std::to_string(table_it->second.fd);
string prefix, suffix;
string map_update_policy = "BPF_ANY";
if (memb_name == "get") {
prefix = "bpf_map_lookup_elem_";
suffix = ")";
} else if (memb_name == "put") {
prefix = "bpf_map_update_elem_";
suffix = ", " + map_update_policy + ")";
} else if (memb_name == "delete") {
prefix = "bpf_map_delete_elem_";
suffix = ")";
} else {
llvm::errs() << "error: unknown bpf_table operation " << memb_name << "\n";
return false;
}
prefix += "(bpf_pseudo_fd(1, " + fd + "), ";
SourceRange argRange(Call->getArg(0)->getLocStart(),
Call->getArg(Call->getNumArgs()-1)->getLocEnd());
string args = rewriter_.getRewrittenText(argRange);
rewriter_.ReplaceText(SourceRange(Call->getLocStart(), Call->getLocEnd()), prefix + args + suffix);
return true;
}
}
}
return true;
}
// look for table subscript references, and turn them into auto table entries:
// table.data[key]
// becomes:
// struct Key key = {123};
// struct Leaf *leaf = table.get(&key);
// if (!leaf) {
// struct Leaf zleaf = {0};
// table.put(&key, &zleaf, BPF_NOEXIST);
// leaf = table.get(&key);
// if (!leaf) return -1;
// }
bool BTypeVisitor::VisitArraySubscriptExpr(ArraySubscriptExpr *Arr) {
Expr *LHS = Arr->getLHS()->IgnoreImplicit();
Expr *RHS = Arr->getRHS()->IgnoreImplicit();
if (MemberExpr *Memb = dyn_cast<MemberExpr>(LHS)) {
if (DeclRefExpr *Ref = dyn_cast<DeclRefExpr>(Memb->getBase())) {
if (SectionAttr *A = Ref->getDecl()->getAttr<SectionAttr>()) {
if (A->getName().startswith("maps")) {
auto table_it = tables_.find(Ref->getDecl()->getName());
if (table_it == tables_.end()) {
C.getDiagnostics().Report(Ref->getLocEnd(), diag::err_expected)
<< "initialized handle for bpf_table";
return false;
}
string fd = std::to_string(table_it->second.fd);
string map_update_policy = "BPF_NOEXIST";
string name = Ref->getDecl()->getName();
SourceRange argRange(RHS->getLocStart(), RHS->getLocEnd());
string args = rewriter_.getRewrittenText(argRange);
string lookup = "bpf_map_lookup_elem_(bpf_pseudo_fd(1, " + fd + ")";
string update = "bpf_map_update_elem_(bpf_pseudo_fd(1, " + fd + ")";
string txt = "(*({typeof(" + name + ".leaf) *leaf = " + lookup + ", " + args + "); ";
txt += "if (!leaf) {";
txt += " typeof(" + name + ".leaf) zleaf = {0};";
txt += " " + update + ", " + args + ", &zleaf, " + map_update_policy + ");";
txt += " leaf = " + lookup + ", " + args + ");";
txt += " if (!leaf) return -1;";
txt += "}";
txt += "leaf;}))";
rewriter_.ReplaceText(SourceRange(Arr->getLocStart(), Arr->getLocEnd()), txt);
}
}
}
}
return true;
}
// Open table FDs when bpf tables (as denoted by section("maps*") attribute)
// are declared.
bool BTypeVisitor::VisitVarDecl(VarDecl *Decl) {
const RecordType *R = Decl->getType()->getAs<RecordType>();
if (SectionAttr *A = Decl->getAttr<SectionAttr>()) {
if (!A->getName().startswith("maps"))
return true;
if (!R) {
C.getDiagnostics().Report(Decl->getLocEnd(), diag::err_expected)
<< "struct type for bpf_table";
return false;
}
const RecordDecl *RD = R->getDecl()->getDefinition();
BPFTable table;
unsigned i = 0;
for (auto F : RD->fields()) {
size_t sz = C.getTypeSize(F->getType()) >> 3;
if (F->getName() == "key") {
table.key_size = sz;
} else if (F->getName() == "leaf") {
table.leaf_size = sz;
} else if (F->getName() == "data") {
table.max_entries = sz / table.leaf_size;
}
++i;
}
bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC;
if (A->getName() == "maps/hash")
map_type = BPF_MAP_TYPE_HASH;
else if (A->getName() == "maps/array")
map_type = BPF_MAP_TYPE_ARRAY;
table.fd = bpf_create_map(map_type, table.key_size, table.leaf_size, table.max_entries);
if (table.fd < 0) {
llvm::errs() << "error: could not open bpf fd\n";
return false;
}
tables_[Decl->getName()] = table;
}
return true;
}
bool BTypeVisitor::VisitDeclRefExpr(DeclRefExpr *E) {
//ValueDecl *D = E->getDecl();
//BPFTableAttr *A = D->getAttr<BPFTableAttr>();
return true;
}
BTypeConsumer::BTypeConsumer(ASTContext &C, Rewriter &rewriter, map<string, BPFTable> &tables)
: visitor_(C, rewriter, tables) {
}
bool BTypeConsumer::HandleTopLevelDecl(DeclGroupRef D) {
for (auto it : D)
visitor_.TraverseDecl(it);
return true;
}
BFrontendAction::BFrontendAction(llvm::raw_ostream &os)
: rewriter_(new Rewriter), os_(os), tables_(new map<string, BPFTable>) {
}
void BFrontendAction::EndSourceFileAction() {
// uncomment to see rewritten source
//rewriter_->getEditBuffer(rewriter_->getSourceMgr().getMainFileID()).write(llvm::errs());
rewriter_->getEditBuffer(rewriter_->getSourceMgr().getMainFileID()).write(os_);
os_.flush();
}
unique_ptr<ASTConsumer> BFrontendAction::CreateASTConsumer(CompilerInstance &Compiler, llvm::StringRef InFile) {
rewriter_->setSourceMgr(Compiler.getSourceManager(), Compiler.getLangOpts());
return unique_ptr<ASTConsumer>(new BTypeConsumer(Compiler.getASTContext(), *rewriter_, *tables_));
}
}
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <clang/AST/RecursiveASTVisitor.h>
#include <clang/Frontend/FrontendAction.h>
#include <clang/Rewrite/Core/Rewriter.h>
namespace clang {
class ASTConsumer;
class ASTContext;
class CompilerInstance;
}
namespace llvm {
class raw_ostream;
class StringRef;
}
namespace ebpf {
struct BPFTable {
int fd;
size_t key_size;
size_t leaf_size;
size_t max_entries;
};
// Type visitor and rewriter for B programs.
// It will look for B-specific features and rewrite them into a valid
// C program. As part of the processing, open the necessary BPF tables
// and store the open handles in a map of table-to-fd's.
class BTypeVisitor : public clang::RecursiveASTVisitor<BTypeVisitor> {
public:
explicit BTypeVisitor(clang::ASTContext &C, clang::Rewriter &rewriter,
std::map<std::string, BPFTable> &tables);
bool VisitFunctionDecl(clang::FunctionDecl *D);
bool VisitCallExpr(clang::CallExpr *Call);
bool VisitVarDecl(clang::VarDecl *Decl);
bool VisitArraySubscriptExpr(clang::ArraySubscriptExpr *E);
bool VisitDeclRefExpr(clang::DeclRefExpr *E);
private:
clang::ASTContext &C;
clang::Rewriter &rewriter_; /// modifications to the source go into this class
llvm::raw_ostream &out_; /// for debugging
std::map<std::string, BPFTable> &tables_; /// store the open FDs
};
// A helper class to the frontend action, walks the decls
class BTypeConsumer : public clang::ASTConsumer {
public:
explicit BTypeConsumer(clang::ASTContext &C, clang::Rewriter &rewriter,
std::map<std::string, BPFTable> &tables);
bool HandleTopLevelDecl(clang::DeclGroupRef D) override;
private:
BTypeVisitor visitor_;
};
// Create a B program in 2 phases (everything else is normal C frontend):
// 1. Catch the map declarations and open the fd's
// 2. Capture the IR
class BFrontendAction : public clang::ASTFrontendAction {
public:
// Initialize with the output stream where the new source file contents
// should be written.
explicit BFrontendAction(llvm::raw_ostream &os);
// Called by clang when the AST has been completed, here the output stream
// will be flushed.
void EndSourceFileAction() override;
std::unique_ptr<clang::ASTConsumer>
CreateASTConsumer(clang::CompilerInstance &Compiler, llvm::StringRef InFile) override;
// take ownership of the table-to-fd mapping data structure
std::unique_ptr<std::map<std::string, BPFTable>> take_tables() { return move(tables_); }
private:
std::unique_ptr<clang::Rewriter> rewriter_;
llvm::raw_ostream &os_;
std::unique_ptr<std::map<std::string, BPFTable>> tables_;
};
} // namespace visitor
/*
* ====================================================================
* Copyright (c) 2012-2013, PLUMgrid, http://plumgrid.com
*
* This source is subject to the PLUMgrid License.
* All rights reserved.
*
* THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF
* ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
* PARTICULAR PURPOSE.
*
* PLUMgrid confidential information, delete if you are not the
* intended recipient.
*
* ====================================================================
*/
#include <linux/skbuff.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#define assert(v)
static inline u16 bpf_ntohs(u16 val) {
/* will be recognized by gcc into rotate insn and eventually rolw 8 */
return (val << 8) | (val >> 8);
}
static inline u32 bpf_ntohl(u32 val) {
/* gcc will use bswapsi2 insn */
return __builtin_bswap32(val);
}
static inline u64 bpf_ntohll(u64 val) {
/* gcc will use bswapdi2 insn */
return __builtin_bswap64(val);
}
static inline unsigned __int128 bpf_ntoh128(unsigned __int128 val) {
return (((unsigned __int128)bpf_ntohll(val) << 64) | (u64)bpf_ntohll(val >> 64));
}
static inline u16 bpf_htons(u16 val) {
return bpf_ntohs(val);
}
static inline u32 bpf_htonl(u32 val) {
return bpf_ntohl(val);
}
static inline u64 bpf_htonll(u64 val) {
return bpf_ntohll(val);
}
static inline unsigned __int128 bpf_hton128(unsigned __int128 val) {
return bpf_ntoh128(val);
}
static inline u64 load_dword(void *skb, u64 off) {
return ((u64)load_word(skb, off) << 4) | load_word(skb, off + 4);
}
void bpf_store_byte(void *skb, u64 off, u64 val) asm("llvm.bpf.store.byte");
void bpf_store_half(void *skb, u64 off, u64 val) asm("llvm.bpf.store.half");
void bpf_store_word(void *skb, u64 off, u64 val) asm("llvm.bpf.store.word");
static inline void bpf_store_dword(void *skb, u64 off, u64 val) {
bpf_store_word(skb, off, (u32)val);
bpf_store_word(skb, off + 4, val >> 32);
}
#define MASK(_n) ((_n) < 64 ? (1ull << (_n)) - 1 : ((u64)-1LL))
#define MASK128(_n) ((_n) < 128 ? ((unsigned __int128)1 << (_n)) - 1 : ((unsigned __int128)-1))
struct _skbuff;
struct bpf_context;
//static inline __attribute__((always_inline))
SEC("helpers")
u64 bpf_dext_pkt(void *pkt, u64 off, u64 bofs, u64 bsz) {
if (bofs == 0 && bsz == 8) {
return load_byte(pkt, off);
} else if (bofs + bsz <= 8) {
return load_byte(pkt, off) >> (8 - (bofs + bsz)) & MASK(bsz);
} else if (bofs == 0 && bsz == 16) {
return load_half(pkt, off);
} else if (bofs + bsz <= 16) {
return load_half(pkt, off) >> (16 - (bofs + bsz)) & MASK(bsz);
} else if (bofs == 0 && bsz == 32) {
return load_word(pkt, off);
} else if (bofs + bsz <= 32) {
return load_word(pkt, off) >> (32 - (bofs + bsz)) & MASK(bsz);
} else if (bofs + bsz <= 64) {
return bpf_ntohll(load_dword(pkt, off)) >> (64 - (bofs + bsz)) & MASK(bsz);
} else {
assert(0);
}
return 0;
}
//static inline __attribute__((always_inline))
SEC("helpers")
void bpf_dins_pkt(void *pkt, u64 off, u64 bofs, u64 bsz, u64 val) {
// The load_xxx function does a bswap before returning the short/word/dword,
// so the value in register will always be host endian. However, the bytes
// written back need to be in network order.
if (bofs == 0 && bsz == 8) {
bpf_skb_store_bytes(pkt, off, &val, 1, 0);
} else if (bofs + bsz <= 8) {
u8 v = load_byte(pkt, off);
v &= ~(MASK(bsz) << (8 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (8 - (bofs + bsz)));
bpf_skb_store_bytes(pkt, off, &v, 1, 0);
} else if (bofs == 0 && bsz == 16) {
u16 v = bpf_htons(val);
bpf_skb_store_bytes(pkt, off, &v, 2, 0);
} else if (bofs + bsz <= 16) {
u16 v = load_half(pkt, off);
v &= ~(MASK(bsz) << (16 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (16 - (bofs + bsz)));
v = bpf_htons(v);
bpf_skb_store_bytes(pkt, off, &v, 2, 0);
} else if (bofs == 0 && bsz == 32) {
u32 v = bpf_htonl(val);
bpf_skb_store_bytes(pkt, off, &v, 4, 0);
} else if (bofs + bsz <= 32) {
u32 v = load_word(pkt, off);
v &= ~(MASK(bsz) << (32 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (32 - (bofs + bsz)));
v = bpf_htonl(v);
bpf_skb_store_bytes(pkt, off, &v, 4, 0);
} else if (bofs == 0 && bsz == 64) {
u64 v = bpf_htonll(val);
bpf_skb_store_bytes(pkt, off, &v, 8, 0);
} else if (bofs + bsz <= 64) {
u64 v = load_dword(pkt, off);
v &= ~(MASK(bsz) << (64 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (64 - (bofs + bsz)));
v = bpf_htonll(v);
bpf_skb_store_bytes(pkt, off, &v, 8, 0);
} else if (bofs + bsz <= 128) {
assert(0);
//bpf_store_16bytes(pkt, off, bpf_hton128(~(MASK128(bsz) << (128 - (bofs + bsz)))),
// bpf_hton128((val & MASK128(bsz)) << (128 - (bofs + bsz))));
} else {
assert(0);
}
}
SEC("helpers")
void * bpf_map_lookup_elem_(uintptr_t map, void *key) {
return bpf_map_lookup_elem((void *)map, key);
}
SEC("helpers")
int bpf_map_update_elem_(uintptr_t map, void *key, void *value, u64 flags) {
return bpf_map_update_elem((void *)map, key, value, flags);
}
SEC("helpers")
int bpf_map_delete_elem_(uintptr_t map, void *key) {
return bpf_map_delete_elem((void *)map, key);
}
SEC("helpers")
int bpf_skb_store_bytes_(void *ctx, u64 off, void *from, u64 len, u64 flags) {
return bpf_skb_store_bytes(ctx, off, from, len, flags);
}
SEC("helpers")
int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
switch (flags & 0xf) {
case 2:
return bpf_l3_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
case 4:
return bpf_l3_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
case 8:
return bpf_l3_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
default:
{}
}
return bpf_l3_csum_replace(ctx, off, from, to, flags);
}
SEC("helpers")
int bpf_l4_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
switch (flags & 0xf) {
case 2:
return bpf_l4_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
case 4:
return bpf_l4_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
case 8:
return bpf_l4_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
default:
{}
}
return bpf_l4_csum_replace(ctx, off, from, to, flags);
}
#undef assert
......@@ -35,6 +35,12 @@ char * bpf_program_license(void *program) {
return prog->license();
}
unsigned bpf_program_kern_version(void *program) {
auto prog = static_cast<ebpf::BPFProgram *>(program);
if (!prog) return 0;
return prog->kern_version();
}
int bpf_program_table_fd(void *program, const char *table_name) {
auto prog = static_cast<ebpf::BPFProgram *>(program);
if (!prog) return -1;
......
......@@ -11,6 +11,7 @@ void bpf_program_destroy(void *program);
void * bpf_program_start(void *program, const char *name);
size_t bpf_program_size(void *program, const char *name);
char * bpf_program_license(void *program);
unsigned bpf_program_kern_version(void *program);
int bpf_program_table_fd(void *program, const char *table_name);
#ifdef __cplusplus
......
#ifndef __BPF_HELPERS_H
#define __BPF_HELPERS_H
#include <linux/bpf.h>
#include <linux/version.h>
/* helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
* are interpreted by elf_bpf loader
*/
#define SEC(NAME) __attribute__((section(NAME), used))
// Changes to the macro require changes in BFrontendAction classes
#define BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries) \
struct _name##_table_t { \
_key_type key; \
_leaf_type leaf; \
_leaf_type * (*get) (_key_type *); \
int (*put) (_key_type *, _leaf_type *); \
int (*delete) (_key_type *); \
_leaf_type data[_max_entries]; \
}; \
__attribute__((section("maps/" _table_type))) \
struct _name##_table_t _name
// export this function to llvm by putting it into a specially named section
//#define BPF_EXPORT(_ret, _name, ...) SEC("." #_name) _ret _name(__VA_ARGS__)
#define BPF_EXPORT(_name) __attribute__((section("." #_name)))
char _license[4] SEC("license") = "GPL";
unsigned _version SEC("version") = LINUX_VERSION_CODE;
/* helper functions called from eBPF programs written in C */
static void *(*bpf_map_lookup_elem)(void *map, void *key) =
(void *) BPF_FUNC_map_lookup_elem;
......@@ -53,4 +77,169 @@ static int (*bpf_l4_csum_replace)(void *ctx, unsigned long long off, unsigned lo
unsigned long long to, unsigned long long flags) =
(void *) BPF_FUNC_l4_csum_replace;
static inline u16 bpf_ntohs(u16 val) {
/* will be recognized by gcc into rotate insn and eventually rolw 8 */
return (val << 8) | (val >> 8);
}
static inline u32 bpf_ntohl(u32 val) {
/* gcc will use bswapsi2 insn */
return __builtin_bswap32(val);
}
static inline u64 bpf_ntohll(u64 val) {
/* gcc will use bswapdi2 insn */
return __builtin_bswap64(val);
}
static inline unsigned __int128 bpf_ntoh128(unsigned __int128 val) {
return (((unsigned __int128)bpf_ntohll(val) << 64) | (u64)bpf_ntohll(val >> 64));
}
static inline u16 bpf_htons(u16 val) {
return bpf_ntohs(val);
}
static inline u32 bpf_htonl(u32 val) {
return bpf_ntohl(val);
}
static inline u64 bpf_htonll(u64 val) {
return bpf_ntohll(val);
}
static inline unsigned __int128 bpf_hton128(unsigned __int128 val) {
return bpf_ntoh128(val);
}
static inline u64 load_dword(void *skb, u64 off) {
return ((u64)load_word(skb, off) << 4) | load_word(skb, off + 4);
}
void bpf_store_byte(void *skb, u64 off, u64 val) asm("llvm.bpf.store.byte");
void bpf_store_half(void *skb, u64 off, u64 val) asm("llvm.bpf.store.half");
void bpf_store_word(void *skb, u64 off, u64 val) asm("llvm.bpf.store.word");
u64 bpf_pseudo_fd(u64, u64) asm("llvm.bpf.pseudo");
static inline void bpf_store_dword(void *skb, u64 off, u64 val) {
bpf_store_word(skb, off, (u32)val);
bpf_store_word(skb, off + 4, val >> 32);
}
#define MASK(_n) ((_n) < 64 ? (1ull << (_n)) - 1 : ((u64)-1LL))
#define MASK128(_n) ((_n) < 128 ? ((unsigned __int128)1 << (_n)) - 1 : ((unsigned __int128)-1))
struct bpf_context;
//static inline __attribute__((always_inline))
SEC("helpers")
u64 bpf_dext_pkt(void *pkt, u64 off, u64 bofs, u64 bsz) {
if (bofs == 0 && bsz == 8) {
return load_byte(pkt, off);
} else if (bofs + bsz <= 8) {
return load_byte(pkt, off) >> (8 - (bofs + bsz)) & MASK(bsz);
} else if (bofs == 0 && bsz == 16) {
return load_half(pkt, off);
} else if (bofs + bsz <= 16) {
return load_half(pkt, off) >> (16 - (bofs + bsz)) & MASK(bsz);
} else if (bofs == 0 && bsz == 32) {
return load_word(pkt, off);
} else if (bofs + bsz <= 32) {
return load_word(pkt, off) >> (32 - (bofs + bsz)) & MASK(bsz);
} else if (bofs + bsz <= 64) {
return bpf_ntohll(load_dword(pkt, off)) >> (64 - (bofs + bsz)) & MASK(bsz);
}
return 0;
}
//static inline __attribute__((always_inline))
SEC("helpers")
void bpf_dins_pkt(void *pkt, u64 off, u64 bofs, u64 bsz, u64 val) {
// The load_xxx function does a bswap before returning the short/word/dword,
// so the value in register will always be host endian. However, the bytes
// written back need to be in network order.
if (bofs == 0 && bsz == 8) {
bpf_skb_store_bytes(pkt, off, &val, 1, 0);
} else if (bofs + bsz <= 8) {
u8 v = load_byte(pkt, off);
v &= ~(MASK(bsz) << (8 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (8 - (bofs + bsz)));
bpf_skb_store_bytes(pkt, off, &v, 1, 0);
} else if (bofs == 0 && bsz == 16) {
u16 v = bpf_htons(val);
bpf_skb_store_bytes(pkt, off, &v, 2, 0);
} else if (bofs + bsz <= 16) {
u16 v = load_half(pkt, off);
v &= ~(MASK(bsz) << (16 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (16 - (bofs + bsz)));
v = bpf_htons(v);
bpf_skb_store_bytes(pkt, off, &v, 2, 0);
} else if (bofs == 0 && bsz == 32) {
u32 v = bpf_htonl(val);
bpf_skb_store_bytes(pkt, off, &v, 4, 0);
} else if (bofs + bsz <= 32) {
u32 v = load_word(pkt, off);
v &= ~(MASK(bsz) << (32 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (32 - (bofs + bsz)));
v = bpf_htonl(v);
bpf_skb_store_bytes(pkt, off, &v, 4, 0);
} else if (bofs == 0 && bsz == 64) {
u64 v = bpf_htonll(val);
bpf_skb_store_bytes(pkt, off, &v, 8, 0);
} else if (bofs + bsz <= 64) {
u64 v = load_dword(pkt, off);
v &= ~(MASK(bsz) << (64 - (bofs + bsz)));
v |= ((val & MASK(bsz)) << (64 - (bofs + bsz)));
v = bpf_htonll(v);
bpf_skb_store_bytes(pkt, off, &v, 8, 0);
}
}
SEC("helpers")
void * bpf_map_lookup_elem_(uintptr_t map, void *key) {
return bpf_map_lookup_elem((void *)map, key);
}
SEC("helpers")
int bpf_map_update_elem_(uintptr_t map, void *key, void *value, u64 flags) {
return bpf_map_update_elem((void *)map, key, value, flags);
}
SEC("helpers")
int bpf_map_delete_elem_(uintptr_t map, void *key) {
return bpf_map_delete_elem((void *)map, key);
}
SEC("helpers")
int bpf_skb_store_bytes_(void *ctx, u64 off, void *from, u64 len, u64 flags) {
return bpf_skb_store_bytes(ctx, off, from, len, flags);
}
SEC("helpers")
int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
switch (flags & 0xf) {
case 2:
return bpf_l3_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
case 4:
return bpf_l3_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
case 8:
return bpf_l3_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
default:
{}
}
return bpf_l3_csum_replace(ctx, off, from, to, flags);
}
SEC("helpers")
int bpf_l4_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
switch (flags & 0xf) {
case 2:
return bpf_l4_csum_replace(ctx, off, bpf_htons(from), bpf_htons(to), flags);
case 4:
return bpf_l4_csum_replace(ctx, off, bpf_htonl(from), bpf_htonl(to), flags);
case 8:
return bpf_l4_csum_replace(ctx, off, bpf_htonll(from), bpf_htonll(to), flags);
default:
{}
}
return bpf_l4_csum_replace(ctx, off, from, to, flags);
}
#endif
This diff is collapsed.
......@@ -31,6 +31,7 @@ class Module;
}
namespace ebpf {
class BPFTable;
namespace cc {
class CodegenLLVM;
......@@ -43,7 +44,8 @@ class BPFProgram {
int parse();
int finalize();
void dump_ir();
int load_helper(std::unique_ptr<llvm::Module> *mod);
int load_file_module(std::unique_ptr<llvm::Module> *mod, const std::string &file);
int load_includes(const std::string &tmpfile);
int kbuild_flags(const char *uname_release, std::vector<std::string> *cflags);
public:
BPFProgram(unsigned flags);
......@@ -53,6 +55,7 @@ class BPFProgram {
size_t size(const std::string &name) const;
int table_fd(const std::string &name) const;
char * license() const;
unsigned kern_version() const;
private:
unsigned flags_; // 0x1 for printing
std::string filename_;
......@@ -64,6 +67,7 @@ class BPFProgram {
std::unique_ptr<ebpf::cc::Parser> proto_parser_;
std::unique_ptr<ebpf::cc::CodegenLLVM> codegen_;
std::map<std::string, std::tuple<uint8_t *, uintptr_t>> sections_;
std::unique_ptr<std::map<std::string, BPFTable>> tables_;
};
} // namespace ebpf
......@@ -1151,7 +1151,10 @@ StatusTuple CodegenLLVM::visit_func_decl_stmt_node(FuncDeclStmtNode *n) {
VariableDeclStmtNode *formal = it->get();
if (formal->is_struct()) {
StructType *stype;
TRY2(lookup_struct_type(formal, &stype));
//TRY2(lookup_struct_type(formal, &stype));
auto var = (StructVariableDeclStmtNode *)formal;
stype = mod_->getTypeByName("struct." + var->struct_id_->name_);
if (!stype) return mkstatus_(n, "could not find type %s", var->struct_id_->c_str());
formals.push_back(PointerType::getUnqual(stype));
} else {
formals.push_back(B.getIntNTy(formal->bit_width_));
......
#include <fcntl.h>
#include <ftw.h>
#include "kbuild_helper.h"
namespace ebpf {
using std::string;
using std::vector;
KBuildHelper::KBuildHelper() {
char *home = ::getenv("HOME");
if (home)
cache_dir_ = string(home) + "/.cache/bcc";
else
cache_dir_ = "/var/run/bcc";
}
// Makefile helper for kbuild_flags
int KBuildHelper::learn_flags(const string &tmpdir, const char *uname_release, const char *cachefile) {
{
// Create a kbuild file to generate the flags
string makefile = tmpdir + "/Makefile";
FILEPtr mf(::fopen(makefile.c_str(), "w"));
if (!mf)
return -1;
fprintf(&*mf, "obj-y := dummy.o\n");
fprintf(&*mf, "CACHEDIR=$(dir %s)\n", cachefile);
fprintf(&*mf, "$(CACHEDIR):\n");
fprintf(&*mf, "\t@mkdir -p $(CACHEDIR)\n");
fprintf(&*mf, "$(obj)/%%.o: $(src)/%%.c $(CACHEDIR)\n");
fprintf(&*mf, "\t@echo -n \"$(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) "
"-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \" > %s\n", cachefile);
}
{
string cfile = tmpdir + "/dummy.c";
FILEPtr cf(::fopen(cfile.c_str(), "w"));
if (!cf)
return -1;
}
string cmd = "make -s";
cmd += " -C " KERNEL_MODULES_DIR "/" + string(uname_release) + "/build";
cmd += " M=" + tmpdir + " dummy.o";
int rc = ::system(cmd.c_str());
if (rc < 0) {
::perror("system");
return -1;
}
return ::open(cachefile, O_RDONLY);
}
// read the flags from cache or learn
int KBuildHelper::get_flags(const char *uname_release, vector<string> *cflags) {
char cachefile[256];
snprintf(cachefile, sizeof(cachefile), "%s/%s.flags", cache_dir_.c_str(), uname_release);
int cachefd = ::open(cachefile, O_RDONLY);
if (cachefd < 0) {
TmpDir tmpdir;
if (!tmpdir.ok())
return -1;
cachefd = learn_flags(tmpdir.str(), uname_release, cachefile);
if (cachefd < 0)
return -1;
}
FILEPtr f(::fdopen(cachefd, "r"));
size_t len = 0;
char *line = NULL;
ssize_t nread;
while ((nread = getdelim(&line, &len, ' ', &*f)) >= 0) {
if (nread == 0 || (nread == 1 && line[0] == ' ')) continue;
if (line[nread - 1] == ' ')
--nread;
cflags->push_back(string(line, nread));
}
free(line);
return 0;
}
} // namespace ebpf
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <unistd.h>
#define KERNEL_MODULES_DIR "/lib/modules"
namespace ebpf {
struct FileDeleter {
void operator() (FILE *fp) {
fclose(fp);
}
};
typedef std::unique_ptr<FILE, FileDeleter> FILEPtr;
// Helper with pushd/popd semantics
class DirStack {
public:
explicit DirStack(const char *dst) : ok_(false) {
if (getcwd(cwd_, sizeof(cwd_)) == NULL) {
::perror("getcwd");
return;
}
if (::chdir(dst)) {
fprintf(stderr, "chdir(%s): %s\n", dst, strerror(errno));
return;
}
ok_ = true;
}
~DirStack() {
if (!ok_) return;
if (::chdir(cwd_)) {
fprintf(stderr, "chdir(%s): %s\n", cwd_, strerror(errno));
}
}
bool ok() const { return ok_; }
const char * cwd() const { return cwd_; }
private:
bool ok_;
char cwd_[256];
};
// Scoped class to manage the creation/deletion of tmpdirs
class TmpDir {
public:
explicit TmpDir(const std::string &prefix = "/tmp/bcc-")
: ok_(false), prefix_(prefix) {
prefix_ += "XXXXXX";
if (::mkdtemp((char *)prefix_.data()) == NULL)
::perror("mkdtemp");
else
ok_ = true;
}
~TmpDir() {
auto fn = [] (const char *path, const struct stat *, int) -> int {
return ::remove(path);
};
if (::ftw(prefix_.c_str(), fn, 20) < 0)
::perror("ftw");
else
::remove(prefix_.c_str());
}
bool ok() const { return ok_; }
const std::string & str() const { return prefix_; }
private:
bool ok_;
std::string prefix_;
};
// Compute the kbuild flags for the currently running kernel
// Do this by:
// 1. Create temp Makefile with stub dummy.c
// 2. Run module build on that makefile, saving the computed flags to a file
// 3. Cache the file for fast flag lookup in subsequent runs
// Note: Depending on environment, different cache locations may be desired. In
// case we eventually support non-root user programs, cache in $HOME.
class KBuildHelper {
private:
int learn_flags(const std::string &tmpdir, const char *uname_release, const char *cachefile);
public:
KBuildHelper();
int get_flags(const char *uname_release, std::vector<std::string> *cflags);
private:
std::string cache_dir_;
};
} // namespace ebpf
......@@ -103,7 +103,7 @@ std::string tmp_str_cc;
"u32" return save(Tok::TU32);
"u64" return save(Tok::TU64);
[a-zA-Z][a-zA-Z0-9_]* return save(Tok::TIDENTIFIER);
[a-zA-Z_][a-zA-Z0-9_]* return save(Tok::TIDENTIFIER);
[0-9]+ return save(Tok::TINTEGER);
0x[0-9a-fA-F]+ return save(Tok::THEXINTEGER);
......
......@@ -85,7 +85,7 @@ char bpf_log_buf[LOG_BUF_SIZE];
int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int prog_len,
const char *license)
const char *license, unsigned kern_version)
{
union bpf_attr attr = {
.prog_type = prog_type,
......@@ -97,7 +97,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
.log_level = 1,
};
attr.kern_version = LINUX_VERSION_CODE;
attr.kern_version = kern_version;
bpf_log_buf[0] = 0;
int ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
......
......@@ -19,7 +19,7 @@ int bpf_get_next_key(int fd, void *key, void *next_key);
int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int insn_len,
const char *license);
const char *license, unsigned kern_version);
int bpf_attach_socket(int sockfd, int progfd);
int bpf_attach_filter(int progfd, const char *prog_name, uint32_t ifindex,
uint8_t prio, uint32_t classid);
......
add_test(NAME py_test1 WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_test1 ${CMAKE_CURRENT_SOURCE_DIR}/test1.py namespace)
add_test(NAME py_test1_b WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_test1_b namespace ${CMAKE_CURRENT_SOURCE_DIR}/test1.py test1.b proto.b)
add_test(NAME py_test1_c WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_test1_c namespace ${CMAKE_CURRENT_SOURCE_DIR}/test1.py test1.c)
add_test(NAME py_test2 WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_test2 ${CMAKE_CURRENT_SOURCE_DIR}/test2.py namespace)
COMMAND ${TEST_WRAPPER} py_test2 namespace ${CMAKE_CURRENT_SOURCE_DIR}/test2.py test2.b proto.b)
add_test(NAME py_trace1 WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_trace1 ${CMAKE_CURRENT_SOURCE_DIR}/trace1.py sudo)
add_test(NAME py_trace2 WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_trace2 ${CMAKE_CURRENT_SOURCE_DIR}/trace2.py sudo)
COMMAND ${TEST_WRAPPER} py_trace1 sudo ${CMAKE_CURRENT_SOURCE_DIR}/trace1.py trace1.b kprobe.b)
add_test(NAME py_trace2_b WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_trace2_b sudo ${CMAKE_CURRENT_SOURCE_DIR}/trace2.py trace2.b kprobe.b)
add_test(NAME py_trace2_c WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_trace2_c sudo ${CMAKE_CURRENT_SOURCE_DIR}/trace2.py trace2.c)
add_test(NAME py_trace3_c WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_trace3_c sudo ${CMAKE_CURRENT_SOURCE_DIR}/trace3.py trace3.c)
#packed "true"
struct skbuff {
u32 len:32;
u32 pkt_type:32;
u32 mark:32;
u32 queue_mapping:32;
u32 protocol:32;
u32 vlan_present:32;
u32 vlan_tci:32;
u32 vlan_proto:32;
u32 priority:32;
};
struct ethernet {
u64 dst:48;
u64 src:48;
......
#packed "false"
struct IPKey {
u32 dip:32;
u32 sip:32;
......@@ -10,7 +8,11 @@ struct IPLeaf {
};
Table<IPKey, IPLeaf, FIXED_MATCH, AUTO> stats(1024);
u32 main(struct proto::skbuff *skb) {
struct skbuff {
u32 type:32;
};
u32 main(struct skbuff *skb) {
u32 ret:32 = 0;
goto proto::ethernet;
......
#include "../../src/cc/bpf_helpers.h"
struct IPKey {
u32 dip;
u32 sip;
};
struct IPLeaf {
u64 rx_pkts;
u64 tx_pkts;
};
BPF_TABLE("hash", struct IPKey, struct IPLeaf, stats, 256);
BPF_EXPORT(main)
int foo(struct __sk_buff *skb) {
size_t next = 0, cur = 0;
ethernet:
{
cur = next; next += 14;
switch (bpf_dext_pkt(skb, cur + 12, 0, 16)) {
case 0x800: goto ip;
case 0x8100: goto dot1q;
default: goto EOP;
}
}
dot1q:
{
cur = next; next += 4;
switch (bpf_dext_pkt(skb, cur + 2, 0, 16)) {
case 0x0800: goto ip;
default: goto EOP;
}
}
ip:
{
cur = next; next += 20;
int rx = 0;
int tx = 0;
struct IPKey key = {0};
if (bpf_dext_pkt(skb, cur + 16, 0, 32) > bpf_dext_pkt(skb, cur + 12, 0, 32)) {
key.sip = bpf_dext_pkt(skb, cur + 12, 0, 32);
key.dip = bpf_dext_pkt(skb, cur + 16, 0, 32);
rx = 1;
} else {
key.dip = bpf_dext_pkt(skb, cur + 12, 0, 32);
key.sip = bpf_dext_pkt(skb, cur + 16, 0, 32);
tx = 1;
}
// try to get here:
//stats[key].rx_pkts += rx;
//stats[key].tx_pkts += tx;
// or here:
//struct IPLeaf *leaf = stats[key];
//if (leaf) {
// __sync_fetch_and_add(&leaf->rx_pkts, rx);
// __sync_fetch_and_add(&leaf->tx_pkts, tx);
//}
struct IPLeaf *leaf;
leaf = stats.get(&key);
if (!leaf) {
struct IPLeaf zleaf = {0};
stats.put(&key, &zleaf);
leaf = stats.get(&key);
}
if (leaf) {
__sync_fetch_and_add(&leaf->rx_pkts, rx);
__sync_fetch_and_add(&leaf->tx_pkts, tx);
}
switch (bpf_dext_pkt(skb, cur + 9, 0, 8)) {
case 6: goto tcp;
case 17: goto udp;
//case 47: goto gre;
default: goto EOP;
}
}
udp:
{
cur = next; next += 8;
switch (bpf_dext_pkt(skb, cur + 2, 0, 16)) {
//case 8472: goto vxlan;
//case 4789: goto vxlan;
default: goto EOP;
}
}
tcp:
{
cur = next; next += 20;
goto EOP;
}
EOP:
return 0;
}
......@@ -7,8 +7,14 @@ from ctypes import c_uint, c_ulong, Structure
from netaddr import IPAddress
from bpf import BPF
from subprocess import check_call
import sys
from unittest import main, TestCase
arg1 = sys.argv.pop(1)
arg2 = ""
if len(sys.argv) > 1:
arg2 = sys.argv.pop(1)
class Key(Structure):
_fields_ = [("dip", c_uint),
("sip", c_uint)]
......@@ -18,7 +24,7 @@ class Leaf(Structure):
class TestBPFSocket(TestCase):
def setUp(self):
self.prog = BPF("main", "test1.b", "proto.b", debug=0)
self.prog = BPF("main", arg1, arg2, debug=0)
self.prog.attach("eth0")
self.stats = self.prog.table("stats", Key, Leaf)
......
......@@ -13,7 +13,11 @@ struct IPLeaf {
};
Table<IPKey, IPLeaf, FIXED_MATCH, NONE> xlate(1024);
u32 main (struct proto::skbuff *skb) {
struct skbuff {
u32 type:32;
};
u32 main (struct skbuff *skb) {
u32 ret:32 = 1;
u32 orig_dip:32 = 0;
......
......@@ -4,9 +4,15 @@ from ctypes import c_uint, c_ulonglong, Structure
from netaddr import IPAddress
from bpf import BPF
from socket import socket, AF_INET, SOCK_DGRAM
import sys
from time import sleep
from unittest import main, TestCase
arg1 = sys.argv.pop(1)
arg2 = ""
if len(sys.argv) > 1:
arg2 = sys.argv.pop(1)
class Key(Structure):
_fields_ = [("dip", c_uint),
("sip", c_uint)]
......@@ -17,7 +23,7 @@ class Leaf(Structure):
class TestBPFSocket(TestCase):
def setUp(self):
self.prog = BPF("main", "test2.b", "proto.b",
self.prog = BPF("main", arg1, arg2,
BPF.BPF_PROG_TYPE_SCHED_CLS, debug=0)
with open("/sys/class/net/eth0/ifindex") as f:
ifindex = int(f.read())
......
......@@ -4,8 +4,14 @@ from ctypes import c_uint, c_ulong, Structure
from bpf import BPF
import os
from time import sleep
import sys
from unittest import main, TestCase
arg1 = sys.argv.pop(1)
arg2 = ""
if len(sys.argv) > 1:
arg2 = sys.argv.pop(1)
class Key(Structure):
_fields_ = [("fd", c_ulong)]
class Leaf(Structure):
......@@ -14,7 +20,7 @@ class Leaf(Structure):
class TestKprobe(TestCase):
def setUp(self):
self.prog = BPF("trace1", "trace1.b", "kprobe.b",
self.prog = BPF("trace1", arg1, arg2,
prog_type=BPF.BPF_PROG_TYPE_KPROBE, debug=0)
self.prog.load("sys_wr")
self.prog.load("sys_rd")
......
#include <linux/ptrace.h>
#include "../../src/cc/bpf_helpers.h"
struct Ptr { u64 ptr; };
struct Counters { u64 stat1; };
BPF_TABLE("hash", struct Ptr, struct Counters, stats, 1024);
BPF_EXPORT(count_sched)
int count_sched(struct pt_regs *ctx) {
struct Ptr key = {.ptr=ctx->bx};
#if 1
stats.data[(u64)&key].stat1++;
#else
struct Counters zleaf = {0};
stats.upsert(&key, &zleaf)->stat1++;
#endif
return 0;
}
......@@ -3,8 +3,14 @@
from ctypes import c_uint, c_ulong, Structure
from bpf import BPF
from time import sleep
import sys
from unittest import main, TestCase
arg1 = sys.argv.pop(1)
arg2 = ""
if len(sys.argv) > 1:
arg2 = sys.argv.pop(1)
class Ptr(Structure):
_fields_ = [("ptr", c_ulong)]
class Counters(Structure):
......@@ -12,7 +18,7 @@ class Counters(Structure):
class TestTracingEvent(TestCase):
def setUp(self):
self.prog = BPF("trace2", "trace2.b", "kprobe.b",
self.prog = BPF("trace2", arg1, arg2,
prog_type=BPF.BPF_PROG_TYPE_KPROBE, debug=0)
self.prog.load("count_sched")
self.stats = self.prog.table("stats", Ptr, Counters)
......
#include <linux/ptrace.h>
#include <linux/blkdev.h>
#include "../../src/cc/bpf_helpers.h"
struct Request { u64 rq; };
struct Time { u64 start; };
BPF_TABLE("hash", struct Request, struct Time, requests, 1024);
#define SLOTS 100
BPF_TABLE("array", u32, u64, latency, SLOTS);
static u32 log2(u32 v) {
u32 r, shift;
r = (v > 0xFFFF) << 4; v >>= r;
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
r |= (v >> 1);
return r;
}
static u32 log2l(u64 v) {
u32 hi = v >> 32;
if (hi)
return log2(hi) + 32;
else
return log2(v);
}
BPF_EXPORT(probe_blk_start_request)
int probe_blk_start_request(struct pt_regs *ctx) {
struct Request rq = {.rq = ctx->di};
struct Time tm = {.start = bpf_ktime_get_ns()};
requests.put(&rq, &tm);
return 0;
}
BPF_EXPORT(probe_blk_update_request)
int probe_blk_update_request(struct pt_regs *ctx) {
struct Request rq = {.rq = ctx->di};
struct Time *tm = requests.get(&rq);
if (!tm) return 0;
u64 delta = bpf_ktime_get_ns() - tm->start;
requests.delete(&rq);
u64 lg = log2l(delta);
u64 base = 1ull << lg;
u32 index = (lg * 64 + (delta - base) * 64 / base) * 3 / 64;
if (index >= SLOTS)
index = SLOTS - 1;
__sync_fetch_and_add(&latency.data[(u64)&index], 1);
return 0;
}
#!/usr/bin/env python
from ctypes import c_uint, c_ulong, Structure
from bpf import BPF
from time import sleep
import sys
from unittest import main, TestCase
arg1 = sys.argv.pop(1)
arg2 = ""
if len(sys.argv) > 1:
arg2 = sys.argv.pop(1)
class TestBlkRequest(TestCase):
def setUp(self):
self.prog = BPF("trace3", arg1, arg2,
prog_type=BPF.BPF_PROG_TYPE_KPROBE, debug=0)
self.prog.load("probe_blk_start_request")
self.prog.load("probe_blk_update_request")
self.latency = self.prog.table("latency", c_uint, c_ulong)
self.prog.attach_kprobe("blk_start_request", "probe_blk_start_request", 0, -1)
self.prog.attach_kprobe("blk_update_request", "probe_blk_update_request", 0, -1)
def test_blk1(self):
import subprocess
import os
for i in range(0, 2):
with open("/srv/trace3.txt", "w") as f:
f.write("a" * 4096 * 4096)
subprocess.call(["sync"])
os.unlink("/srv/trace3.txt")
for key in self.latency.iter():
leaf = self.latency.get(key)
print("latency %u:" % key.value, "count %u" % leaf.value)
sys.stdout.flush()
if __name__ == "__main__":
main()
......@@ -3,8 +3,8 @@
#set -x
name=$1; shift
cmd=$1; shift
kind=$1; shift
cmd=$1; shift
PYTHONPATH=@CMAKE_SOURCE_DIR@/src
LD_LIBRARY_PATH=@CMAKE_BINARY_DIR@:@CMAKE_BINARY_DIR@/src/cc
......@@ -30,11 +30,11 @@ function ns_run() {
sudo ip netns exec $ns ip link set eth0 up
sudo ip addr add dev $ns.out 172.16.1.1/24
sudo ip link set $ns.out up
sudo bash -c "PYTHONPATH=$PYTHONPATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH ip netns exec $ns $cmd $@"
sudo bash -c "PYTHONPATH=$PYTHONPATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH ip netns exec $ns $cmd $1 $2"
return $?
}
function sudo_run() {
sudo bash -c "PYTHONPATH=$PYTHONPATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH $cmd $@"
sudo bash -c "PYTHONPATH=$PYTHONPATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH $cmd $1 $2"
return $?
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment