Commit 55d67114 authored by Brenden Blanco's avatar Brenden Blanco

added P4 deparser synthesis

parent 61fb04d3
......@@ -238,18 +238,12 @@ Here are some limitations imposed on the P4 programs:
* arithmetic on data wider than 32 bits is not supported
* mutating the network packet does not work. The P4 programs allow
users to express packet header mutations; however, the generated
code does not currently include packet reassembly (i.e., a P4
"deparser"), which would store the mutated headers back into the
network packet. Some of this functionality could be implemented in
the future, but currently the EBPF interfaces to the kernel do not
permit arbitrary packet mutations.
* checksum computations are not implemented. In consequence, programs
that IP/TCP/UDP headers will produce incorrect packet headers.
* EBPF does not offer support for ternary or LPM tables
* cloning and recirculation and not supported, since the underlying
TC-based framework does not support some of this functionality.
* P4 cloning and recirculation and not supported
* meters and registers are not supported; only direct counters are
currently supported. EBPF can potentially support registers and
......
# Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from collections import defaultdict, OrderedDict
from p4_hlir.hlir import parse_call, p4_field, p4_parse_value_set, \
P4_DEFAULT, p4_parse_state, p4_table, \
p4_conditional_node, p4_parser_exception, \
p4_header_instance, P4_NEXT
import ebpfProgram
import ebpfInstance
import ebpfType
import ebpfStructType
from topoSorting import Graph
from programSerializer import ProgramSerializer
def produce_parser_topo_sorting(hlir):
# This function is copied from the P4 behavioral model implementation
header_graph = Graph()
def walk_rec(hlir, parse_state, prev_hdr_node, tag_stacks_index):
assert(isinstance(parse_state, p4_parse_state))
for call in parse_state.call_sequence:
call_type = call[0]
if call_type == parse_call.extract:
hdr = call[1]
if hdr.virtual:
base_name = hdr.base_name
current_index = tag_stacks_index[base_name]
if current_index > hdr.max_index:
return
tag_stacks_index[base_name] += 1
name = base_name + "[%d]" % current_index
hdr = hlir.p4_header_instances[name]
if hdr not in header_graph:
header_graph.add_node(hdr)
hdr_node = header_graph.get_node(hdr)
if prev_hdr_node:
prev_hdr_node.add_edge_to(hdr_node)
else:
header_graph.root = hdr
prev_hdr_node = hdr_node
for branch_case, next_state in parse_state.branch_to.items():
if not next_state:
continue
if not isinstance(next_state, p4_parse_state):
continue
walk_rec(hlir, next_state, prev_hdr_node, tag_stacks_index.copy())
start_state = hlir.p4_parse_states["start"]
walk_rec(hlir, start_state, None, defaultdict(int))
header_topo_sorting = header_graph.produce_topo_sorting()
return header_topo_sorting
class EbpfDeparser(object):
def __init__(self, hlir):
header_topo_sorting = produce_parser_topo_sorting(hlir)
self.headerOrder = [hdr.name for hdr in header_topo_sorting]
def serialize(self, serializer, program):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
serializer.emitIndent()
serializer.blockStart()
serializer.emitIndent()
serializer.appendLine("/* Deparser */")
serializer.emitIndent()
serializer.appendFormat("{0} = 0;", program.offsetVariableName)
serializer.newline()
for h in self.headerOrder:
header = program.getHeaderInstance(h)
self.serializeHeaderEmit(header, serializer, program)
serializer.blockEnd(True)
def serializeHeaderEmit(self, header, serializer, program):
assert isinstance(header, ebpfInstance.EbpfHeader)
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
p4header = header.hlirInstance
assert isinstance(p4header, p4_header_instance)
serializer.emitIndent()
serializer.appendFormat("if ({0}.{1}.valid) ",
program.headerStructName, header.name)
serializer.blockStart()
if ebpfProgram.EbpfProgram.isArrayElementInstance(p4header):
ebpfStack = program.getStackInstance(p4header.base_name)
assert isinstance(ebpfStack, ebpfInstance.EbpfHeaderStack)
if isinstance(p4header.index, int):
index = "[" + str(headerInstance.index) + "]"
elif p4header.index is P4_NEXT:
index = "[" + ebpfStack.indexVar + "]"
else:
raise CompilationException(
True, "Unexpected index for array {0}",
p4header.index)
basetype = ebpfStack.basetype
else:
ebpfHeader = program.getHeaderInstance(p4header.name)
basetype = ebpfHeader.type
index = ""
alignment = 0
for field in basetype.fields:
assert isinstance(field, ebpfStructType.EbpfField)
self.serializeFieldEmit(serializer, p4header.base_name,
index, field, alignment, program)
alignment += field.widthInBits()
alignment = alignment % 8
serializer.blockEnd(True)
def serializeFieldEmit(self, serializer, name, index,
field, alignment, program):
assert isinstance(index, str)
assert isinstance(name, str)
assert isinstance(field, ebpfStructType.EbpfField)
assert isinstance(serializer, ProgramSerializer)
assert isinstance(alignment, int)
assert isinstance(program, ebpfProgram.EbpfProgram)
if field.name == "valid":
return
fieldToEmit = (program.headerStructName + "." + name +
index + "." + field.name)
width = field.widthInBits()
if width <= 32:
store = self.generatePacketStore(fieldToEmit, 0, alignment,
width, program)
serializer.emitIndent()
serializer.appendLine(store)
else:
# Destination is bigger than 4 bytes and
# represented as a byte array.
b = (width + 7) / 8
for i in range(0, b):
serializer.emitIndent()
store = self.generatePacketStore(fieldToEmit + "["+str(i)+"]",
i,
alignment,
8, program)
serializer.appendLine(store)
serializer.emitIndent()
serializer.appendFormat("{0} += {1};",
program.offsetVariableName, width)
serializer.newline()
def generatePacketStore(self, value, offset, alignment, width, program):
assert width > 0
assert alignment < 8
assert isinstance(width, int)
assert isinstance(alignment, int)
return "bpf_dins_pkt({0}, {1} / 8 + {2}, {3}, {4}, {5});".format(
program.packetName,
program.offsetVariableName,
offset,
alignment,
width,
value
)
......@@ -11,6 +11,7 @@ import ebpfAction
import ebpfInstance
import ebpfConditional
import ebpfCounter
import ebpfDeparser
import programSerializer
import target
from compilationException import *
......@@ -62,6 +63,7 @@ class EbpfProgram(object):
self.metadata = [] # metadata instances
self.stacks = [] # header stack instances EbpfHeaderStack
self.parsers = [] # all parsers
self.deparser = None
self.entryPoints = [] # control-flow entry points from parser
self.counters = []
self.entryPointLabels = {} # maps p4_node from entryPoints
......@@ -123,6 +125,7 @@ class EbpfProgram(object):
self.conditionals.append(conditional)
self.egressEntry = self.hlir.p4_egress_ptr
self.deparser = ebpfDeparser.EbpfDeparser(self.hlir)
def isInternalAction(self, action):
# This is a heuristic really to guess which actions are built-in
......@@ -174,6 +177,8 @@ class EbpfProgram(object):
self.generateParser(serializer)
self.generatePipeline(serializer)
self.generateDeparser(serializer)
serializer.emitIndent()
serializer.appendLine("end:")
serializer.emitIndent()
......@@ -335,6 +340,9 @@ class EbpfProgram(object):
self.metadataStructTypeName,
self.metadataStructName)
def generateDeparser(self, serializer):
self.deparser.serialize(serializer, self)
def generateInitializeMetadata(self, serializer):
assert isinstance(serializer, programSerializer.ProgramSerializer)
......
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
#
#
# -*- coding: utf-8 -*-
class Node(object):
def __init__(self, n):
self.n = n
self.edges = set()
def add_edge_to(self, other):
assert(isinstance(other, Node))
self.edges.add(other)
def __str__(self):
return str(self.n)
class Graph(object):
def __init__(self):
self.nodes = {}
self.root = None
def add_node(self, node):
assert(node not in self.nodes)
self.nodes[node] = Node(node)
def __contains__(self, node):
return node in self.nodes
def get_node(self, node):
return self.nodes[node]
def produce_topo_sorting(self):
def visit(node, topo_sorting, sequence=None):
if sequence is not None:
sequence += [str(node)]
if node._behavioral_topo_sorting_mark == 1:
if sequence is not None:
print "cycle", sequence
return False
if node._behavioral_topo_sorting_mark != 2:
node._behavioral_topo_sorting_mark = 1
for next_node in node.edges:
res = visit(next_node, topo_sorting, sequence)
if not res:
return False
node._behavioral_topo_sorting_mark = 2
topo_sorting.insert(0, node.n)
return True
has_cycle = False
topo_sorting = []
for node in self.nodes.values():
# 0 is unmarked, 1 is temp, 2 is permanent
node._behavioral_topo_sorting_mark = 0
for node in self.nodes.values():
if node._behavioral_topo_sorting_mark == 0:
if not visit(node, topo_sorting, sequence=[]):
has_cycle = True
break
# removing mark
for node in self.nodes.values():
del node._behavioral_topo_sorting_mark
if has_cycle:
return None
return topo_sorting
../../../../../../bpf-docs/p4toEbpf-bcc.pdf
\ No newline at end of file
/*
Copyright 2013-present Barefoot Networks, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
header_type ethernet_t {
fields {
dstAddr : 48;
srcAddr : 48;
etherType : 16;
}
}
header_type ipv4_t {
fields {
version : 4;
ihl : 4;
diffserv : 8;
totalLen : 16;
identification : 16;
flags : 3;
fragOffset : 13;
ttl : 8;
protocol : 8;
hdrChecksum : 16;
srcAddr : 32;
dstAddr: 32;
}
}
parser start {
return parse_ethernet;
}
#define ETHERTYPE_IPV4 0x0800
header ethernet_t ethernet;
parser parse_ethernet {
extract(ethernet);
return select(latest.etherType) {
ETHERTYPE_IPV4 : parse_ipv4;
default: ingress;
}
}
header ipv4_t ipv4;
/* Not yet supported on EBPF target
field_list ipv4_checksum_list {
ipv4.version;
ipv4.ihl;
ipv4.diffserv;
ipv4.totalLen;
ipv4.identification;
ipv4.flags;
ipv4.fragOffset;
ipv4.ttl;
ipv4.protocol;
ipv4.srcAddr;
ipv4.dstAddr;
}
field_list_calculation ipv4_checksum {
input {
ipv4_checksum_list;
}
algorithm : csum16;
output_width : 16;
}
calculated_field ipv4.hdrChecksum {
verify ipv4_checksum;
update ipv4_checksum;
}
*/
parser parse_ipv4 {
extract(ipv4);
return ingress;
}
#define PORT_VLAN_TABLE_SIZE 32768
#define BD_TABLE_SIZE 65536
#define IPV4_LPM_TABLE_SIZE 16384
#define IPV4_HOST_TABLE_SIZE 131072
#define NEXTHOP_TABLE_SIZE 32768
#define REWRITE_MAC_TABLE_SIZE 32768
#define VRF_BIT_WIDTH 12
#define BD_BIT_WIDTH 16
#define IFINDEX_BIT_WIDTH 10
/* METADATA */
header_type ingress_metadata_t {
fields {
vrf : VRF_BIT_WIDTH; /* VRF */
bd : BD_BIT_WIDTH; /* ingress BD */
nexthop_index : 16; /* final next hop index */
}
}
metadata ingress_metadata_t ingress_metadata;
action on_miss() {
}
action set_bd(bd) {
modify_field(ingress_metadata.bd, bd);
}
table port_mapping {
reads {
standard_metadata.ingress_port : exact;
}
actions {
set_bd;
}
size : PORT_VLAN_TABLE_SIZE;
}
action set_vrf(vrf) {
modify_field(ingress_metadata.vrf, vrf);
}
table bd {
reads {
ingress_metadata.bd : exact;
}
actions {
set_vrf;
}
size : BD_TABLE_SIZE;
}
action fib_hit_nexthop(nexthop_index) {
modify_field(ingress_metadata.nexthop_index, nexthop_index);
subtract_from_field(ipv4.ttl, 1);
}
table ipv4_fib {
reads {
ingress_metadata.vrf : exact;
ipv4.dstAddr : exact;
}
actions {
on_miss;
fib_hit_nexthop;
}
size : IPV4_HOST_TABLE_SIZE;
}
table ipv4_fib_lpm {
reads {
ingress_metadata.vrf : exact;
ipv4.dstAddr : exact; // lpm not supported
}
actions {
on_miss;
fib_hit_nexthop;
}
size : IPV4_LPM_TABLE_SIZE;
}
action set_egress_details(egress_spec) {
modify_field(standard_metadata.egress_spec, egress_spec);
}
table nexthop {
reads {
ingress_metadata.nexthop_index : exact;
}
actions {
on_miss;
set_egress_details;
}
size : NEXTHOP_TABLE_SIZE;
}
control ingress {
if (valid(ipv4)) {
/* derive ingress_metadata.bd */
apply(port_mapping);
/* derive ingress_metadata.vrf */
apply(bd);
/* fib lookup, set ingress_metadata.nexthop_index */
apply(ipv4_fib) {
on_miss {
apply(ipv4_fib_lpm);
}
}
/* derive standard_metadata.egress_spec from ingress_metadata.nexthop_index */
apply(nexthop);
}
}
action rewrite_src_dst_mac(smac, dmac) {
modify_field(ethernet.srcAddr, smac);
modify_field(ethernet.dstAddr, dmac);
}
table rewrite_mac {
reads {
ingress_metadata.nexthop_index : exact;
}
actions {
on_miss;
rewrite_src_dst_mac;
}
size : REWRITE_MAC_TABLE_SIZE;
}
control egress {
/* set smac and dmac from ingress_metadata.nexthop_index */
apply(rewrite_mac);
}
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment