Commit 0bde502c authored by Suchakra Sharma's avatar Suchakra Sharma

Merge branch 'master' into open_probes

parents 416613aa b1e3b670
......@@ -36,6 +36,7 @@ struct _name##_table_t { \
int (*update) (_key_type *, _leaf_type *); \
int (*delete) (_key_type *); \
void (*call) (void *, int index); \
void (*increment) (_key_type); \
_leaf_type data[_max_entries]; \
}; \
__attribute__((section("maps/" _table_type))) \
......@@ -55,6 +56,19 @@ struct _name##_table_t _name
#define BPF_HASH(...) \
BPF_HASHX(__VA_ARGS__, BPF_HASH3, BPF_HASH2, BPF_HASH1)(__VA_ARGS__)
#define BPF_HIST1(_name) \
BPF_TABLE("histogram", int, u64, _name, 64)
#define BPF_HIST2(_name, _key_type) \
BPF_TABLE("histogram", _key_type, u64, _name, 64)
#define BPF_HIST3(_name, _key_type, _size) \
BPF_TABLE("histogram", _key_type, u64, _name, _size)
#define BPF_HISTX(_1, _2, _3, NAME, ...) NAME
// Define a histogram, some arguments optional
// BPF_HISTOGRAM(name, key_type=int, size=64)
#define BPF_HISTOGRAM(...) \
BPF_HISTX(__VA_ARGS__, BPF_HIST3, BPF_HIST2, BPF_HIST1)(__VA_ARGS__)
// packet parsing state machine helpers
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
......
......@@ -67,9 +67,11 @@ bool BMapDeclVisitor::VisitRecordDecl(RecordDecl *D) {
for (auto F : D->getDefinition()->fields()) {
result_ += "[";
if (F->getType()->isPointerType())
result_ += "\"unsigned long long\"";
result_ += "\"" + F->getName().str() + "\", \"unsigned long long\"";
else
TraverseDecl(F);
if (const ConstantArrayType *T = dyn_cast<ConstantArrayType>(F->getType()))
result_ += ", [" + T->getSize().toString(10, false) + "]";
if (F->isBitField())
result_ += ", " + to_string(F->getBitWidthValue(C));
result_ += "], ";
......@@ -158,6 +160,23 @@ bool ProbeVisitor::VisitBinaryOperator(BinaryOperator *E) {
}
return true;
}
bool ProbeVisitor::VisitUnaryOperator(UnaryOperator *E) {
if (E->getOpcode() != UO_Deref)
return true;
if (memb_visited_.find(E) != memb_visited_.end())
return true;
if (!ProbeChecker(E, ptregs_).needs_probe())
return true;
memb_visited_.insert(E);
Expr *sub = E->getSubExpr();
string rhs = rewriter_.getRewrittenText(SourceRange(sub->getLocStart(), sub->getLocEnd()));
string text;
text = "({ typeof(" + E->getType().getAsString() + ") _val; memset(&_val, 0, sizeof(_val));";
text += " bpf_probe_read(&_val, sizeof(_val), (u64)";
text += rhs + "); _val; })";
rewriter_.ReplaceText(SourceRange(E->getLocStart(), E->getLocEnd()), text);
return true;
}
bool ProbeVisitor::VisitMemberExpr(MemberExpr *E) {
if (memb_visited_.find(E) != memb_visited_.end()) return true;
......@@ -194,7 +213,7 @@ bool ProbeVisitor::VisitMemberExpr(MemberExpr *E) {
}
BTypeVisitor::BTypeVisitor(ASTContext &C, Rewriter &rewriter, vector<TableDesc> &tables)
: C(C), rewriter_(rewriter), out_(llvm::errs()), tables_(tables) {
: C(C), diag_(C.getDiagnostics()), rewriter_(rewriter), out_(llvm::errs()), tables_(tables) {
}
bool BTypeVisitor::VisitFunctionDecl(FunctionDecl *D) {
......@@ -276,7 +295,7 @@ bool BTypeVisitor::VisitCallExpr(CallExpr *Call) {
string map_update_policy = "BPF_ANY";
string txt;
if (memb_name == "lookup_or_init") {
string map_update_policy = "BPF_NOEXIST";
map_update_policy = "BPF_NOEXIST";
string name = Ref->getDecl()->getName();
string arg0 = rewriter_.getRewrittenText(SourceRange(Call->getArg(0)->getLocStart(),
Call->getArg(0)->getLocEnd()));
......@@ -291,6 +310,19 @@ bool BTypeVisitor::VisitCallExpr(CallExpr *Call) {
txt += " if (!leaf) return 0;";
txt += "}";
txt += "leaf;})";
} else if (memb_name == "increment") {
string name = Ref->getDecl()->getName();
string arg0 = rewriter_.getRewrittenText(SourceRange(Call->getArg(0)->getLocStart(),
Call->getArg(0)->getLocEnd()));
string lookup = "bpf_map_lookup_elem_(bpf_pseudo_fd(1, " + fd + ")";
string update = "bpf_map_update_elem_(bpf_pseudo_fd(1, " + fd + ")";
txt = "({ typeof(" + name + ".key) _key = " + arg0 + "; ";
if (table_it->type == BPF_MAP_TYPE_HASH) {
txt += "typeof(" + name + ".leaf) _zleaf; memset(&_zleaf, 0, sizeof(_zleaf)); ";
txt += update + ", &_key, &_zleaf, BPF_NOEXIST); ";
}
txt += "typeof(" + name + ".leaf) *_leaf = " + lookup + ", &_key); ";
txt += "if (_leaf) (*_leaf)++; })";
} else {
if (memb_name == "lookup") {
prefix = "bpf_map_lookup_elem";
......@@ -463,11 +495,21 @@ bool BTypeVisitor::VisitVarDecl(VarDecl *Decl) {
++i;
}
bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC;
if (A->getName() == "maps/hash")
if (A->getName() == "maps/hash") {
map_type = BPF_MAP_TYPE_HASH;
else if (A->getName() == "maps/array")
} else if (A->getName() == "maps/array") {
map_type = BPF_MAP_TYPE_ARRAY;
else if (A->getName() == "maps/prog") {
} else if (A->getName() == "maps/histogram") {
if (table.key_desc == "\"int\"")
map_type = BPF_MAP_TYPE_ARRAY;
else
map_type = BPF_MAP_TYPE_HASH;
if (table.leaf_desc != "\"unsigned long long\"") {
unsigned diag_id = diag_.getCustomDiagID(DiagnosticsEngine::Error,
"histogram leaf type must be u64, got %0");
diag_.Report(Decl->getLocStart(), diag_id) << table.leaf_desc;
}
} else if (A->getName() == "maps/prog") {
struct utsname un;
if (uname(&un) == 0) {
int major = 0, minor = 0;
......@@ -485,8 +527,9 @@ bool BTypeVisitor::VisitVarDecl(VarDecl *Decl) {
table.type = map_type;
table.fd = bpf_create_map(map_type, table.key_size, table.leaf_size, table.max_entries);
if (table.fd < 0) {
C.getDiagnostics().Report(Decl->getLocStart(), diag::err_expected)
<< "valid bpf fd";
unsigned diag_id = C.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error,
"could not open bpf map: %0");
C.getDiagnostics().Report(Decl->getLocStart(), diag_id) << strerror(errno);
return false;
}
tables_.push_back(std::move(table));
......
......@@ -71,6 +71,7 @@ class BTypeVisitor : public clang::RecursiveASTVisitor<BTypeVisitor> {
private:
clang::ASTContext &C;
clang::DiagnosticsEngine &diag_;
clang::Rewriter &rewriter_; /// modifications to the source go into this class
llvm::raw_ostream &out_; /// for debugging
std::vector<TableDesc> &tables_; /// store the open FDs
......@@ -85,6 +86,7 @@ class ProbeVisitor : public clang::RecursiveASTVisitor<ProbeVisitor> {
bool VisitVarDecl(clang::VarDecl *Decl);
bool VisitCallExpr(clang::CallExpr *Call);
bool VisitBinaryOperator(clang::BinaryOperator *E);
bool VisitUnaryOperator(clang::UnaryOperator *E);
bool VisitMemberExpr(clang::MemberExpr *E);
void set_ptreg(clang::Decl *D) { ptregs_.insert(D); }
private:
......
This diff is collapsed.
This folder contains an implementation of a simple compiler that
translates a programs written in a subset of P4 into C that can in
turn be compiled into EBPF using the IOVisor bcc compiler.
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
class CompilationException(Exception):
"""Signals an error during compilation"""
def __init__(self, isBug, format, *message):
# isBug: indicates that this is a compiler bug
super(CompilationException, self).__init__()
assert isinstance(format, str)
assert isinstance(isBug, bool)
self.message = message
self.format = format
self.isBug = isBug
def show(self):
# TODO: format this message nicely
return self.format.format(*self.message)
class NotSupportedException(Exception):
archError = " not supported by EBPF"
def __init__(self, format, *message):
super(NotSupportedException, self).__init__()
assert isinstance(format, str)
self.message = message
self.format = format
def show(self):
# TODO: format this message nicely
return (self.format + NotSupportedException.archError).format(
*self.message)
This diff is collapsed.
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from p4_hlir.hlir import p4_conditional_node, p4_expression
from p4_hlir.hlir import p4_header_instance, p4_field
from programSerializer import ProgramSerializer
from compilationException import CompilationException
import ebpfProgram
import ebpfInstance
class EbpfConditional(object):
@staticmethod
def translate(op):
if op == "not":
return "!"
elif op == "or":
return "||"
elif op == "and":
return "&&"
return op
def __init__(self, p4conditional, program):
assert isinstance(p4conditional, p4_conditional_node)
assert isinstance(program, ebpfProgram.EbpfProgram)
self.hlirconditional = p4conditional
self.name = p4conditional.name
def emitNode(self, node, serializer, program):
if isinstance(node, p4_expression):
self.emitExpression(node, serializer, program, False)
elif node is None:
pass
elif isinstance(node, int):
serializer.append(node)
elif isinstance(node, p4_header_instance):
header = program.getInstance(node.name)
assert isinstance(header, ebpfInstance.EbpfHeader)
# TODO: stacks?
serializer.appendFormat(
"{0}.{1}", program.headerStructName, header.name)
elif isinstance(node, p4_field):
instance = node.instance
einstance = program.getInstance(instance.name)
if isinstance(einstance, ebpfInstance.EbpfHeader):
base = program.headerStructName
else:
base = program.metadataStructName
serializer.appendFormat(
"{0}.{1}.{2}", base, einstance.name, node.name)
else:
raise CompilationException(True, "{0} Unexpected expression ", node)
def emitExpression(self, expression, serializer, program, toplevel):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
assert isinstance(expression, p4_expression)
assert isinstance(toplevel, bool)
left = expression.left
op = expression.op
right = expression.right
assert isinstance(op, str)
if op == "valid":
self.emitNode(right, serializer, program)
serializer.append(".valid")
return
if not toplevel:
serializer.append("(")
self.emitNode(left, serializer, program)
op = EbpfConditional.translate(op)
serializer.append(op)
self.emitNode(right, serializer, program)
if not toplevel:
serializer.append(")")
def generateCode(self, serializer, program, nextNode):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
serializer.emitIndent()
serializer.blockStart()
trueBranch = self.hlirconditional.next_[True]
if trueBranch is None:
trueBranch = nextNode
falseBranch = self.hlirconditional.next_[False]
if falseBranch is None:
falseBranch = nextNode
serializer.emitIndent()
serializer.appendFormat("{0}:", program.getLabel(self.hlirconditional))
serializer.newline()
serializer.emitIndent()
serializer.append("if (")
self.emitExpression(
self.hlirconditional.condition, serializer, program, True)
serializer.appendLine(")")
serializer.increaseIndent()
label = program.getLabel(trueBranch)
serializer.emitIndent()
serializer.appendFormat("goto {0};", label)
serializer.newline()
serializer.decreaseIndent()
serializer.emitIndent()
serializer.appendLine("else")
serializer.increaseIndent()
label = program.getLabel(falseBranch)
serializer.emitIndent()
serializer.appendFormat("goto {0};", label)
serializer.newline()
serializer.decreaseIndent()
serializer.blockEnd(True)
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from p4_hlir.hlir import p4_counter, P4_DIRECT, P4_COUNTER_BYTES
from programSerializer import ProgramSerializer
from compilationException import *
import ebpfTable
import ebpfProgram
class EbpfCounter(object):
# noinspection PyUnresolvedReferences
def __init__(self, hlircounter, program):
assert isinstance(hlircounter, p4_counter)
assert isinstance(program, ebpfProgram.EbpfProgram)
self.name = hlircounter.name
self.hlircounter = hlircounter
width = hlircounter.min_width
# ebpf counters only work on 64-bits
if width <= 64:
self.valueTypeName = program.config.uprefix + "64"
else:
raise NotSupportedException(
"{0}: Counters with {1} bits", hlircounter, width)
self.dataMapName = self.name
if ((hlircounter.binding is None) or
(hlircounter.binding[0] != P4_DIRECT)):
raise NotSupportedException(
"{0}: counter which is not direct", hlircounter)
self.autoIncrement = (hlircounter.binding != None and
hlircounter.binding[0] == P4_DIRECT)
if hlircounter.type is P4_COUNTER_BYTES:
self.increment = "{0}->len".format(program.packetName)
else:
self.increment = "1"
def getSize(self, program):
if self.hlircounter.instance_count is not None:
return self.hlircounter.instance_count
if self.autoIncrement:
return self.getTable(program).size
program.emitWarning(
"{0} does not specify a max_size; using 1024", self.hlircounter)
return 1024
def getTable(self, program):
table = program.getTable(self.hlircounter.binding[1].name)
assert isinstance(table, ebpfTable.EbpfTable)
return table
def serialize(self, serializer, program):
assert isinstance(serializer, ProgramSerializer)
# Direct counters have the same key as the associated table
# Static counters have integer keys
if self.autoIncrement:
keyTypeName = "struct " + self.getTable(program).keyTypeName
else:
keyTypeName = program.config.uprefix + "32"
program.config.serializeTableDeclaration(
serializer, self.dataMapName, True, keyTypeName,
self.valueTypeName, self.getSize(program))
def serializeCode(self, keyname, serializer, program):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
serializer.emitIndent()
serializer.appendFormat("/* Update counter {0} */", self.name)
serializer.newline()
valueName = "ctrvalue"
initValuename = "init_val"
serializer.emitIndent()
serializer.appendFormat("{0} *{1};", self.valueTypeName, valueName)
serializer.newline()
serializer.emitIndent()
serializer.appendFormat("{0} {1};", self.valueTypeName, initValuename)
serializer.newline()
serializer.emitIndent()
serializer.appendLine("/* perform lookup */")
serializer.emitIndent()
program.config.serializeLookup(
serializer, self.dataMapName, keyname, valueName)
serializer.newline()
serializer.emitIndent()
serializer.appendFormat("if ({0} != NULL) ", valueName)
serializer.newline()
serializer.increaseIndent()
serializer.emitIndent()
serializer.appendFormat("__sync_fetch_and_add({0}, {1});",
valueName, self.increment)
serializer.newline()
serializer.decreaseIndent()
serializer.emitIndent()
serializer.append("else ")
serializer.blockStart()
serializer.emitIndent()
serializer.appendFormat("{0} = {1};", initValuename, self.increment)
serializer.newline()
serializer.emitIndent()
program.config.serializeUpdate(
serializer, self.dataMapName, keyname, initValuename)
serializer.newline()
serializer.blockEnd(True)
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from p4_hlir.hlir import p4_header_instance
from ebpfType import EbpfType
from compilationException import CompilationException
from programSerializer import ProgramSerializer
import typeFactory
class EbpfInstanceBase(object):
def __init__(self):
pass
class SimpleInstance(EbpfInstanceBase):
# A header or a metadata instance (but not array elements)
def __init__(self, hlirInstance, factory, isMetadata):
super(SimpleInstance, self).__init__()
self.hlirInstance = hlirInstance
self.name = hlirInstance.base_name
self.type = factory.build(hlirInstance.header_type, isMetadata)
def declare(self, serializer):
assert isinstance(serializer, ProgramSerializer)
self.type.declare(serializer, self.name, False)
class EbpfHeader(SimpleInstance):
""" Represents a header instance from a P4 program """
def __init__(self, hlirHeaderInstance, factory):
super(EbpfHeader, self).__init__(hlirHeaderInstance, factory, False)
if hlirHeaderInstance.metadata:
raise CompilationException(True, "Metadata passed to EpbfHeader")
if hlirHeaderInstance.index is not None:
self.name += "_" + str(hlirHeaderInstance.index)
class EbpfMetadata(SimpleInstance):
"""Represents a metadata instance from a P4 program"""
def __init__(self, hlirMetadataInstance, factory):
super(EbpfMetadata, self).__init__(hlirMetadataInstance, factory, True)
if not hlirMetadataInstance.metadata:
raise CompilationException(
True, "Header instance passed to EpbfMetadata {0}",
hlirMetadataInstance)
if hlirMetadataInstance.index is not None:
raise CompilationException(
True, "Unexpected metadata array {0}", self.hlirInstance)
if hasattr(hlirMetadataInstance, "initializer"):
self.initializer = hlirMetadataInstance.initializer
else:
self.initializer = None
def emitInitializer(self, serializer):
assert isinstance(serializer, ProgramSerializer)
if self.initializer is None:
self.type.emitInitializer(serializer)
else:
for key in self.initializer.keys():
serializer.appendFormat(
".{0} = {1},", key, self.initializer[key])
class EbpfHeaderStack(EbpfInstanceBase):
"""Represents a header stack instance; there is one instance of
this class for each STACK, and not for each
element of the stack, as in the HLIR"""
def __init__(self, hlirInstance, indexVar, factory):
super(EbpfHeaderStack, self).__init__()
# indexVar: name of the ebpf variable that
# holds the current index for this stack
assert isinstance(indexVar, str)
assert isinstance(factory, typeFactory.EbpfTypeFactory)
assert isinstance(hlirInstance, p4_header_instance)
self.indexVar = indexVar
self.name = hlirInstance.base_name
self.basetype = factory.build(hlirInstance.header_type, False)
assert isinstance(self.basetype, EbpfType)
self.arraySize = hlirInstance.max_index + 1
self.hlirInstance = hlirInstance
def declare(self, serializer):
assert isinstance(serializer, ProgramSerializer)
self.basetype.declareArray(serializer, self.name, self.arraySize)
This diff is collapsed.
This diff is collapsed.
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from p4_hlir.hlir import P4_AUTO_WIDTH
from ebpfType import *
from compilationException import *
from programSerializer import ProgramSerializer
class EbpfScalarType(EbpfType):
__doc__ = "Represents a scalar type"
def __init__(self, parent, widthInBits, isSigned, config):
super(EbpfScalarType, self).__init__(None)
assert isinstance(widthInBits, int)
assert isinstance(isSigned, bool)
self.width = widthInBits
self.isSigned = isSigned
self.config = config
if widthInBits is P4_AUTO_WIDTH:
raise NotSupportedException("{0} Variable-width field", parent)
def widthInBits(self):
return self.width
@staticmethod
def bytesRequired(width):
return (width + 7) / 8
def asString(self):
if self.isSigned:
prefix = self.config.iprefix
else:
prefix = self.config.uprefix
if self.width <= 8:
name = prefix + "8"
elif self.width <= 16:
name = prefix + "16"
elif self.width <= 32:
name = prefix + "32"
else:
name = "char*"
return name
def alignment(self):
if self.width <= 8:
return 1
elif self.width <= 16:
return 2
elif self.width <= 32:
return 4
else:
return 1 # Char array
def serialize(self, serializer):
assert isinstance(serializer, ProgramSerializer)
serializer.append(self.asString())
def declareArray(self, serializer, identifier, size):
raise CompilationException(
True, "Arrays of base type not expected in P4")
def declare(self, serializer, identifier, asPointer):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(asPointer, bool)
assert isinstance(identifier, str)
if self.width <= 32:
self.serialize(serializer)
if asPointer:
serializer.append("*")
serializer.space()
serializer.append(identifier)
else:
if asPointer:
serializer.append("char*")
else:
serializer.appendFormat(
"char {0}[{1}]", identifier,
EbpfScalarType.bytesRequired(self.width))
def emitInitializer(self, serializer):
assert isinstance(serializer, ProgramSerializer)
serializer.append("0")
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from p4_hlir.hlir import P4_SIGNED, P4_SATURATING
from ebpfScalarType import *
class EbpfField(object):
__doc__ = "represents a field in a struct type, not in an instance"
def __init__(self, hlirParentType, name, widthInBits, attributes, config):
self.name = name
self.width = widthInBits
self.hlirType = hlirParentType
signed = False
if P4_SIGNED in attributes:
signed = True
if P4_SATURATING in attributes:
raise NotSupportedException(
"{0}.{1}: Saturated types", self.hlirType, self.name)
try:
self.type = EbpfScalarType(
self.hlirType, widthInBits, signed, config)
except CompilationException, e:
raise CompilationException(
e.isBug, "{0}.{1}: {2}", hlirParentType, self.name, e.show())
def widthInBits(self):
return self.width
class EbpfStructType(EbpfType):
# Abstract base class for HeaderType and MetadataType.
# They are both represented by a p4 header_type
def __init__(self, hlirHeader, config):
super(EbpfStructType, self).__init__(hlirHeader)
self.name = hlirHeader.name
self.fields = []
for (fieldName, fieldSize) in self.hlirType.layout.items():
attributes = self.hlirType.attributes[fieldName]
field = EbpfField(
hlirHeader, fieldName, fieldSize, attributes, config)
self.fields.append(field)
def serialize(self, serializer):
assert isinstance(serializer, ProgramSerializer)
serializer.emitIndent()
serializer.appendFormat("struct {0} ", self.name)
serializer.blockStart()
for field in self.fields:
serializer.emitIndent()
field.type.declare(serializer, field.name, False)
serializer.appendFormat("; /* {0} bits */", field.widthInBits())
serializer.newline()
serializer.blockEnd(False)
serializer.endOfStatement(True)
def declare(self, serializer, identifier, asPointer):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(identifier, str)
assert isinstance(asPointer, bool)
serializer.appendFormat("struct {0} ", self.name)
if asPointer:
serializer.append("*")
serializer.append(identifier)
def widthInBits(self):
return self.hlirType.length * 8
def getField(self, name):
assert isinstance(name, str)
for f in self.fields:
assert isinstance(f, EbpfField)
if f.name == name:
return f
raise CompilationException(
True, "Could not locate field {0}.{1}", self, name)
class EbpfHeaderType(EbpfStructType):
def __init__(self, hlirHeader, config):
super(EbpfHeaderType, self).__init__(hlirHeader, config)
validField = EbpfField(hlirHeader, "valid", 1, set(), config)
# check that no "valid" field exists already
for f in self.fields:
if f.name == "valid":
raise CompilationException(
True,
"Header type contains a field named `valid': {0}",
f)
self.fields.append(validField)
def emitInitializer(self, serializer):
assert isinstance(serializer, ProgramSerializer)
serializer.blockStart()
serializer.emitIndent()
serializer.appendLine(".valid = 0")
serializer.blockEnd(False)
def declareArray(self, serializer, identifier, size):
assert isinstance(serializer, ProgramSerializer)
serializer.appendFormat(
"struct {0} {1}[{2}]", self.name, identifier, size)
class EbpfMetadataType(EbpfStructType):
def __init__(self, hlirHeader, config):
super(EbpfMetadataType, self).__init__(hlirHeader, config)
def emitInitializer(self, serializer):
assert isinstance(serializer, ProgramSerializer)
serializer.blockStart()
for field in self.fields:
serializer.emitIndent()
serializer.appendFormat(".{0} = ", field.name)
field.type.emitInitializer(serializer)
serializer.append(",")
serializer.newline()
serializer.blockEnd(False)
This diff is collapsed.
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from compilationException import CompilationException
class EbpfType(object):
__doc__ = "Base class for representing a P4 type"
def __init__(self, hlirType):
self.hlirType = hlirType
# Methods to override
def serialize(self, serializer):
# the type itself
raise CompilationException(True, "Method must be overridden")
def declare(self, serializer, identifier, asPointer):
# declaration of an identifier with this type
# asPointer is a boolean;
# if true, the identifier is declared as a pointer
raise CompilationException(True, "Method must be overridden")
def emitInitializer(self, serializer):
# A default initializer suitable for this type
raise CompilationException(True, "Method must be overridden")
def declareArray(self, serializer, identifier, size):
# Declare an identifier with an array type with the specified size
raise CompilationException(True, "Method must be overridden")
#!/usr/bin/env python
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# Compiler from P4 to EBPF
# (See http://www.slideshare.net/PLUMgrid/ebpf-and-linux-networking).
# This compiler in fact generates a C source file
# which can be compiled to EBPF using the LLVM compiler
# with the ebpf target.
#
# Main entry point.
import argparse
import os
import traceback
import sys
import target
from p4_hlir.main import HLIR
from ebpfProgram import EbpfProgram
from compilationException import *
from programSerializer import ProgramSerializer
def get_parser():
parser = argparse.ArgumentParser(description='p4toEbpf arguments')
parser.add_argument('source', metavar='source', type=str,
help='a P4 source file to compile')
parser.add_argument('-g', dest='generated', default="router",
help="kind of output produced: filter or router")
parser.add_argument('-o', dest='output_file', default="output.c",
help="generated C file name")
return parser
def process(input_args):
parser = get_parser()
args, unparsed_args = parser.parse_known_args(input_args)
has_remaining_args = False
preprocessor_args = []
for a in unparsed_args:
if a[:2] == "-D" or a[:2] == "-I" or a[:2] == "-U":
input_args.remove(a)
preprocessor_args.append(a)
else:
has_remaining_args = True
# trigger error
if has_remaining_args:
parser.parse_args(input_args)
if args.generated == "router":
isRouter = True
elif args.generated == "filter":
isRouter = False
else:
print("-g should be one of 'filter' or 'router'")
print("*** Compiling ", args.source)
return compileP4(args.source, args.output_file, isRouter, preprocessor_args)
class CompileResult(object):
def __init__(self, kind, error):
self.kind = kind
self.error = error
def __str__(self):
if self.kind == "OK":
return "Compilation successful"
else:
return "Compilation failed with error: " + self.error
def compileP4(inputFile, gen_file, isRouter, preprocessor_args):
h = HLIR(inputFile)
for parg in preprocessor_args:
h.add_preprocessor_args(parg)
if not h.build():
return CompileResult("HLIR", "Error while building HLIR")
try:
basename = os.path.basename(inputFile)
basename = os.path.splitext(basename)[0]
config = target.BccConfig()
e = EbpfProgram(basename, h, isRouter, config)
serializer = ProgramSerializer()
e.toC(serializer)
f = open(gen_file, 'w')
f.write(serializer.toString())
return CompileResult("OK", "")
except CompilationException, e:
prefix = ""
if e.isBug:
prefix = "### Compiler bug: "
return CompileResult("bug", prefix + e.show())
except NotSupportedException, e:
return CompileResult("not supported", e.show())
except:
return CompileResult("exception", traceback.format_exc())
# main entry point
if __name__ == "__main__":
result = process(sys.argv[1:])
if result.kind != "OK":
print(str(result))
#!/usr/bin/env python
# helper for building C program source text
from compilationException import *
class ProgramSerializer(object):
def __init__(self):
self.program = ""
self.eol = "\n"
self.currentIndent = 0
self.INDENT_AMOUNT = 4 # default indent amount
def __str__(self):
return self.program
def increaseIndent(self):
self.currentIndent += self.INDENT_AMOUNT
def decreaseIndent(self):
self.currentIndent -= self.INDENT_AMOUNT
if self.currentIndent < 0:
raise CompilationException(True, "Negative indentation level")
def toString(self):
return self.program
def space(self):
self.append(" ")
def newline(self):
self.program += self.eol
def endOfStatement(self, addNewline):
self.append(";")
if addNewline:
self.newline()
def append(self, string):
self.program += str(string)
def appendFormat(self, format, *args):
string = format.format(*args)
self.append(string)
def appendLine(self, string):
self.append(string)
self.newline()
def emitIndent(self):
self.program += " " * self.currentIndent
def blockStart(self):
self.append("{")
self.newline()
self.increaseIndent()
def blockEnd(self, addNewline):
self.decreaseIndent()
self.emitIndent()
self.append("}")
if addNewline:
self.newline()
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from programSerializer import ProgramSerializer
# abstraction for isolating target-specific features
# Base class for representing target-specific configuration
class TargetConfig(object):
def __init__(self, target):
self.targetName = target
def getIncludes(self):
return ""
def serializeLookup(self, serializer, tableName, key, value):
serializer.appendFormat("{0} = bpf_map_lookup_elem(&{1}, &{2});",
value, tableName, key)
def serializeUpdate(self, serializer, tableName, key, value):
serializer.appendFormat(
"bpf_map_update_elem(&{0}, &{1}, &{2}, BPF_ANY);",
tableName, key, value)
def serializeLicense(self, serializer, licenseString):
assert isinstance(serializer, ProgramSerializer)
serializer.emitIndent()
serializer.appendFormat(
"char _license[] {0}(\"license\") = \"{1}\";",
self.config.section, licenseString)
serializer.newline()
def serializeCodeSection(self, serializer):
assert isinstance(serializer, ProgramSerializer)
serializer.appendFormat("{0}(\"{1}\")", self.section, self.entrySection)
def serializeTableDeclaration(self, serializer, tableName,
isHash, keyType, valueType, size):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(tableName, str)
assert isinstance(isHash, bool)
assert isinstance(keyType, str)
assert isinstance(valueType, str)
assert isinstance(size, int)
serializer.emitIndent()
serializer.appendFormat("struct {0} {1}(\"maps\") {2} = ",
self.tableName, self.section, tableName)
serializer.blockStart()
serializer.emitIndent()
serializer.append(".type = ")
if isHash:
serializer.appendLine("BPF_MAP_TYPE_HASH,")
else:
serializer.appendLine("BPF_MAP_TYPE_ARRAY,")
serializer.emitIndent()
serializer.appendFormat(".{0} = sizeof(struct {1}), ",
self.tableKeyAttribute, keyType)
serializer.newline()
serializer.emitIndent()
serializer.appendFormat(".{0} = sizeof(struct {1}), ",
self.tableValueAttribute, valueType)
serializer.newline()
serializer.emitIndent()
serializer.appendFormat(".{0} = {1}, ", self.tableSizeAttribute, size)
serializer.newline()
serializer.blockEnd(False)
serializer.endOfStatement(True)
def generateDword(self, serializer):
serializer.appendFormat(
"static inline {0}64 load_dword(void *skb, {0}64 off)",
self.uprefix)
serializer.newline()
serializer.blockStart()
serializer.emitIndent()
serializer.appendFormat(
("return (({0}64)load_word(skb, off) << 32) | " +
"load_word(skb, off + 4);"),
self.uprefix)
serializer.newline()
serializer.blockEnd(True)
# Represents a target that is compiled within the kernel
# source tree samples folder and which attaches to a socket
class KernelSamplesConfig(TargetConfig):
def __init__(self):
super(SocketConfig, self).__init__("Socket")
self.entrySection = "socket1"
self.section = "SEC"
self.uprefix = "u"
self.iprefix = "i"
self.tableKeyAttribute = "key_size"
self.tableValueAttribute = "value_size"
self.tableSizeAttribute = "max_entries"
self.tableName = "bpf_map_def"
self.postamble = ""
def getIncludes(self):
return """
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "bpf_helpers.h"
"""
# Represents a target compiled by bcc that uses the TC
class BccConfig(TargetConfig):
def __init__(self):
super(BccConfig, self).__init__("BCC")
self.uprefix = "u"
self.iprefix = "i"
self.postamble = ""
def serializeTableDeclaration(self, serializer, tableName,
isHash, keyType, valueType, size):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(tableName, str)
assert isinstance(isHash, bool)
assert isinstance(keyType, str)
assert isinstance(valueType, str)
assert isinstance(size, int)
serializer.emitIndent()
if isHash:
kind = "hash"
else:
kind = "array"
serializer.appendFormat(
"BPF_TABLE(\"{0}\", {1}, {2}, {3}, {4});",
kind, keyType, valueType, tableName, size)
serializer.newline()
def serializeLookup(self, serializer, tableName, key, value):
serializer.appendFormat("{0} = {1}.lookup(&{2});",
value, tableName, key)
def serializeUpdate(self, serializer, tableName, key, value):
serializer.appendFormat("{0}.update(&{1}, &{2});",
tableName, key, value)
def generateDword(self, serializer):
pass
def serializeCodeSection(self, serializer):
pass
def getIncludes(self):
return """
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/pkt_cls.h>
"""
def serializeLicense(self, serializer, licenseString):
assert isinstance(serializer, ProgramSerializer)
pass
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from p4_hlir.hlir import p4_header
from ebpfStructType import *
class EbpfTypeFactory(object):
def __init__(self, config):
self.type_map = {}
self.config = config
def build(self, hlirType, asMetadata):
name = hlirType.name
if hlirType.name in self.type_map:
retval = self.type_map[name]
if ((not asMetadata and isinstance(retval, EbpfMetadataType)) or
(asMetadata and isinstance(retval, EbpfHeaderType))):
raise CompilationException(
True, "Same type used both as a header and metadata {0}",
hlirType)
if isinstance(hlirType, p4_header):
if asMetadata:
type = EbpfMetadataType(hlirType, self.config)
else:
type = EbpfHeaderType(hlirType, self.config)
else:
raise CompilationException(True, "Unexpected type {0}", hlirType)
self.registerType(name, type)
return type
def registerType(self, name, ebpfType):
self.type_map[name] = ebpfType
This folder contains tests for the P4->C->EBPF compiler
- cleanup.sh should be run if for some reason endToEndTest.py crashes
and leaves garbage namespaces or links
- testP4toEbpf.py compiles all P4 files in the testprograms folder and
deposits the corresponding C files in the testoutputs folder
- endToEndTest.py runs a complete end-to-end test compiling the
testprograms/simple.p4 program, creating a virtual network with 3
boxes (using network namespaces): client, server, switch, loading
the EBPF into the kernel of the switch box using the TC, and
implementing the forwarding in the switch solely using the P4
program.
#!/bin/bash
# Run this script if for some reason the endToEndTest.py crashed
# and left some garbage state
ip netns del sw
ip netns del srv
ip netns del clt
ip link del dev veth-clt-sw
ip link del dev veth-srv-sw
This diff is collapsed.
#!/usr/bin/env python
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# Runs the compiler on all files in the 'testprograms' folder
# Writes outputs in the 'testoutputs' folder
from bcc import BPF
import os, sys
sys.path.append("../compiler") # To get hold of p4toEbpf
# We want to run it without installing it
import p4toEbpf
import os
def drop_extension(filename):
return os.path.splitext(os.path.basename(filename))[0]
filesFailed = {} # map error kind -> list[ (file, error) ]
def set_error(kind, file, error):
if kind in filesFailed:
filesFailed[kind].append((file, error))
else:
filesFailed[kind] = [(file, error)]
def is_root():
# Is this code portable?
return os.getuid() == 0
def main():
testpath = "testprograms"
destFolder = "testoutputs"
files = os.listdir(testpath)
files.sort()
filesDone = 0
errors = 0
if not is_root():
print "Loading EBPF programs requires root priviledge."
print "Will only test compilation, not loading."
print "(Run with sudo to test program loading.)"
for f in files:
path = os.path.join(testpath, f)
if not os.path.isfile(path):
continue
if not path.endswith(".p4"):
continue
destname = drop_extension(path) + ".c"
destname = os.path.join(destFolder, destname)
args = [path, "-o", destname]
result = p4toEbpf.process(args)
if result.kind != "OK":
errors += 1
print path, result.error
set_error(result.kind, path, result.error)
else:
# Try to load the compiled function
if is_root():
try:
print("Compiling and loading BPF program")
b = BPF(src_file=destname, debug=0)
fn = b.load_func("ebpf_filter", BPF.SCHED_CLS)
except Exception as e:
print(e)
set_error("BPF error", path, str(e))
filesDone += 1
print "Compiled", filesDone, "files", errors, "errors"
for key in sorted(filesFailed):
print key, ":", len(filesFailed[key]), "programs"
for v in filesFailed[key]:
print "\t", v
exit(len(filesFailed) != 0)
if __name__ == "__main__":
main()
header_type ethernet_t {
fields {
dstAddr : 48;
srcAddr : 48;
etherType : 16;
}
}
parser start {
return parse_ethernet;
}
header ethernet_t ethernet;
parser parse_ethernet {
extract(ethernet);
return ingress;
}
action nop()
{}
table routing {
reads {
ethernet.dstAddr: exact;
}
actions { nop; }
size : 512;
}
control ingress
{
apply(routing);
}
\ No newline at end of file
header_type ht
{
fields
{
f1 : 1;
f2 : 2;
f3 : 3;
f4 : 4;
f5 : 5;
f6 : 6;
f7 : 7;
f8 : 8;
f9 : 9;
f10 : 10;
f11 : 11;
f12 : 12;
f13 : 13;
f14 : 14;
f15 : 15;
f16 : 16;
f17 : 17;
f18 : 18;
f19 : 19;
f20 : 20;
f21 : 21;
f22 : 22;
f23 : 23;
f24 : 24;
f25 : 25;
f26 : 26;
f27 : 27;
f28 : 28;
f29 : 29;
f30 : 30;
f31 : 31;
f32 : 32;
}
}
header_type larget
{
fields
{
f48 : 48;
f1: 1;
f49 : 48;
f2 : 1;
f64 : 64;
f3 : 1;
f128 : 128;
}
}
header ht h;
header larget large;
parser start
{
extract(h);
extract(large);
return ingress;
}
control ingress
{
}
header_type ethernet_t {
fields {
dstAddr : 48;
}
}
header_type ipv4_t {
fields {
srcAddr : 32;
}
}
parser start {
return parse_ethernet;
}
header ethernet_t ethernet;
parser parse_ethernet {
extract(ethernet);
return parse_ipv4;
}
action nop()
{}
header ipv4_t ipv4;
parser parse_ipv4 {
extract(ipv4);
return ingress;
}
table routing {
reads {
ethernet.dstAddr: exact;
ipv4.srcAddr: exact;
}
actions { nop; }
size : 512;
}
control ingress
{
apply(routing);
}
\ No newline at end of file
header_type ethernet_t {
fields {
dstAddr : 48;
srcAddr : 48;
etherType : 16;
}
}
header_type ipv4_t {
fields {
version : 4;
ihl : 4;
diffserv : 8;
totalLen : 16;
identification : 16;
flags : 3;
fragOffset : 13;
ttl : 8;
protocol : 8;
hdrChecksum : 16;
srcAddr : 32;
dstAddr: 32;
}
}
parser start {
return parse_ethernet;
}
header ethernet_t ethernet;
parser parse_ethernet {
extract(ethernet);
return select(latest.etherType) {
0x800 : parse_ipv4;
default: ingress;
}
}
action nop()
{}
action forward(port)
{
modify_field(standard_metadata.egress_port, port);
}
header ipv4_t ipv4;
parser parse_ipv4 {
extract(ipv4);
return ingress;
}
table routing {
reads {
ipv4.dstAddr: exact;
ipv4.srcAddr: exact;
}
actions { nop; forward; }
size : 512;
}
counter cnt {
type: bytes;
direct: routing;
}
control ingress
{
apply(routing);
}
\ No newline at end of file
/* Sample P4 program */
header_type ethernet_t {
fields {
dstAddr : 48;
srcAddr : 48;
etherType : 16;
}
}
parser start {
return parse_ethernet;
}
header ethernet_t ethernet;
parser parse_ethernet {
extract(ethernet);
return ingress;
}
action action_0(){
no_op();
}
table table_0 {
reads {
ethernet.etherType : exact;
}
actions {
action_0;
}
}
control ingress {
apply(table_0);
}
// Routes a packet to an interface based on its IPv4 address
// Maintains a set of counters on the routing table
header_type ethernet_t {
fields {
dstAddr : 48;
srcAddr : 48;
etherType : 16;
}
}
header_type ipv4_t {
fields {
version : 4;
ihl : 4;
diffserv : 8;
totalLen : 16;
identification : 16;
flags : 3;
fragOffset : 13;
ttl : 8;
protocol : 8;
hdrChecksum : 16;
srcAddr : 32;
dstAddr: 32;
}
}
parser start {
return parse_ethernet;
}
header ethernet_t ethernet;
parser parse_ethernet {
extract(ethernet);
return select(latest.etherType) {
0x800 : parse_ipv4;
default: ingress;
}
}
action nop()
{}
action forward(port)
{
modify_field(standard_metadata.egress_port, port);
}
header ipv4_t ipv4;
parser parse_ipv4 {
extract(ipv4);
return ingress;
}
table routing {
reads {
ipv4.dstAddr: exact;
}
actions { nop; forward; }
size : 512;
}
counter cnt {
type: bytes;
direct: routing;
}
control ingress
{
apply(routing);
}
\ No newline at end of file
......@@ -99,7 +99,7 @@ KALLSYMS = "/proc/kallsyms"
ksym_addrs = []
ksym_names = []
ksym_loaded = 0
stars_max = 38
stars_max = 40
@atexit.register
def cleanup_kprobes():
......@@ -238,38 +238,60 @@ class BPF(object):
text = text[:-1] + "+"
return text
def print_log2_hist(self, val_type="value"):
"""print_log2_hist(type=value)
def print_log2_hist(self, val_type="value", bucket_type="ptr"):
"""print_log2_hist(val_type="value", bucket_type="ptr")
Prints a table as a log2 histogram. The table must be stored as
log2. The type argument is optional, and is a column header.
log2. The val_type argument is optional, and is a column header.
If the histogram has a secondary key, multiple tables will print
and bucket_type can be used as a header description for each.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
for k, v in self.items():
bucket = getattr(k, f1)
vals = tmp[bucket] = tmp.get(bucket, [0] * 65)
slot = getattr(k, f2)
vals[slot] = v.value
for bucket, vals in tmp.items():
print("\nBucket %s = %r" % (bucket_type, bucket))
self._print_log2_hist(vals, val_type, 0)
else:
vals = [0] * 65
for k, v in self.items():
vals[k.value] = v.value
self._print_log2_hist(vals, val_type, 0)
def _print_log2_hist(self, vals, val_type, val_max):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i in range(1, log2_dist_max + 1):
try:
val = self[ct.c_int(i)].value
if (val > 0):
idx_max = i
if (val > val_max):
val_max = val
except:
break
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
if idx_max <= 32:
header = " %-19s : count distribution"
body = "%10d -> %-10d : %-8d |%-*s|"
stars = stars_max
else:
header = " %-29s : count distribution"
body = "%20d -> %-20d : %-8d |%-*s|"
stars = int(stars_max / 2)
if idx_max > 0:
print(" %-15s : count distribution" % val_type);
print(header % val_type);
for i in range(1, idx_max + 1):
low = (1 << i) >> 1
high = (1 << i) - 1
if (low == high):
low -= 1
try:
val = self[ct.c_int(i)].value
print("%8d -> %-8d : %-8d |%-*s|" % (low, high, val,
stars_max, self._stars(val, val_max, stars_max)))
except:
break
val = vals[i]
print(body % (low, high, val, stars,
self._stars(val, val_max, stars)))
def __iter__(self):
......@@ -407,7 +429,6 @@ class BPF(object):
u"_Bool": ct.c_bool,
u"char": ct.c_char,
u"wchar_t": ct.c_wchar,
u"char": ct.c_byte,
u"unsigned char": ct.c_ubyte,
u"short": ct.c_short,
u"unsigned short": ct.c_ushort,
......@@ -430,7 +451,12 @@ class BPF(object):
if len(t) == 2:
fields.append((t[0], BPF._decode_table_type(t[1])))
elif len(t) == 3:
fields.append((t[0], BPF._decode_table_type(t[1]), t[2]))
if isinstance(t[2], list):
fields.append((t[0], BPF._decode_table_type(t[1]) * t[2][0]))
else:
fields.append((t[0], BPF._decode_table_type(t[1]), t[2]))
else:
raise Exception("Failed to decode type %s" % str(t))
cls = type(str(desc[0]), (ct.Structure,), dict(_fields_=fields))
return cls
......
......@@ -40,3 +40,5 @@ add_test(NAME py_test_brb2 WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_brb2_c sudo ${CMAKE_CURRENT_SOURCE_DIR}/test_brb2.py test_brb2.c)
add_test(NAME py_test_clang WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_clang sudo ${CMAKE_CURRENT_SOURCE_DIR}/test_clang.py)
add_test(NAME py_test_histogram WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND ${TEST_WRAPPER} py_histogram sudo ${CMAKE_CURRENT_SOURCE_DIR}/test_histogram.py)
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
from ctypes import c_int, c_ulonglong
import random
import time
from unittest import main, TestCase
class TestHistogram(TestCase):
def test_simple(self):
b = BPF(text="""
#include <uapi/linux/ptrace.h>
#include <linux/bpf.h>
BPF_HISTOGRAM(hist1);
BPF_HASH(stub);
int kprobe__htab_map_delete_elem(struct pt_regs *ctx, struct bpf_map *map, u64 *k) {
hist1.increment(bpf_log2l(*k));
return 0;
}
""")
for i in range(0, 32):
for j in range(0, random.randint(1, 10)):
try: del b["stub"][c_ulonglong(1 << i)]
except: pass
b["hist1"].print_log2_hist()
for i in range(32, 64):
for j in range(0, random.randint(1, 10)):
try: del b["stub"][c_ulonglong(1 << i)]
except: pass
b["hist1"].print_log2_hist()
def test_struct(self):
b = BPF(text="""
#include <uapi/linux/ptrace.h>
#include <linux/bpf.h>
typedef struct { void *map; u64 slot; } Key;
BPF_HISTOGRAM(hist1, Key, 1024);
BPF_HASH(stub1);
BPF_HASH(stub2);
int kprobe__htab_map_delete_elem(struct pt_regs *ctx, struct bpf_map *map, u64 *k) {
hist1.increment((Key){map, bpf_log2l(*k)});
return 0;
}
""")
for i in range(0, 64):
for j in range(0, random.randint(1, 10)):
try: del b["stub1"][c_ulonglong(1 << i)]
except: pass
try: del b["stub2"][c_ulonglong(1 << i)]
except: pass
b["hist1"].print_log2_hist()
def test_chars(self):
b = BPF(text="""
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
typedef struct { char name[TASK_COMM_LEN]; u64 slot; } Key;
BPF_HISTOGRAM(hist1, Key, 1024);
int kprobe__finish_task_switch(struct pt_regs *ctx, struct task_struct *prev) {
Key k = {.slot = bpf_log2l(prev->real_start_time)};
if (!bpf_get_current_comm(&k.name, sizeof(k.name)))
hist1.increment(k);
return 0;
}
""")
for i in range(0, 100): time.sleep(0.01)
b["hist1"].print_log2_hist()
if __name__ == "__main__":
main()
......@@ -45,7 +45,7 @@ bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/blkdev.h>
BPF_TABLE(\"array\", int, u64, dist, 64);
BPF_HISTOGRAM(dist);
BPF_HASH(start, struct request *);
// time block I/O
......@@ -70,9 +70,7 @@ int trace_req_completion(struct pt_regs *ctx, struct request *req)
FACTOR
// store as histogram
int index = bpf_log2l(delta);
u64 *leaf = dist.lookup(&index);
if (leaf) (*leaf)++;
dist.increment(bpf_log2l(delta));
start.delete(&req);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment