Commit 45d9cc7c authored by David S. Miller's avatar David S. Miller

Merge branch 'geneve'

Andy Zhou says:

====================
Add Geneve tunnel protocol support

This patch series adds kernel support for Geneve (Generic Network
Virtualization Encapsulation) based on Geneve IETF draft:
http://www.ietf.org/id/draft-gross-geneve-01.txt

Patch 1 implements Geneve tunneling protocol driver

Patch 2-6 adds openvswitch support for creating and using
Geneve tunnels by OVS user space.

v1->v2:   Style fixes: use tab instead space for Kconfig
	  Patch 2-6 are reviewed by Pravin Shetty, add him to acked-by
	  Patch 6 was reviewed by Thomas Graf when commiting
	    to openvswitch.org, add him to acked-by.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c259c132 f5796684
#ifndef __NET_GENEVE_H
#define __NET_GENEVE_H 1
#include <net/udp_tunnel.h>
struct geneve_sock;
typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
struct geneve_sock {
struct hlist_node hlist;
geneve_rcv_t *rcv;
void *rcv_data;
struct work_struct del_work;
struct socket *sock;
struct rcu_head rcu;
atomic_t refcnt;
struct udp_offload udp_offloads;
};
/* Geneve Header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Virtual Network Identifier (VNI) | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Variable Length Options |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Option Header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Option Class | Type |R|R|R| Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Variable Option Data |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct geneve_opt {
__be16 opt_class;
u8 type;
#ifdef __LITTLE_ENDIAN_BITFIELD
u8 length:5;
u8 r3:1;
u8 r2:1;
u8 r1:1;
#else
u8 r1:1;
u8 r2:1;
u8 r3:1;
u8 length:5;
#endif
u8 opt_data[];
};
#define GENEVE_CRIT_OPT_TYPE (1 << 7)
struct genevehdr {
#ifdef __LITTLE_ENDIAN_BITFIELD
u8 opt_len:6;
u8 ver:2;
u8 rsvd1:6;
u8 critical:1;
u8 oam:1;
#else
u8 ver:2;
u8 opt_len:6;
u8 oam:1;
u8 critical:1;
u8 rsvd1:6;
#endif
__be16 proto_type;
u8 vni[3];
u8 rsvd2;
struct geneve_opt options[];
};
#define GENEVE_VER 0
#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6);
void geneve_sock_release(struct geneve_sock *vs);
int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
__u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
bool xnet);
#endif
......@@ -86,15 +86,18 @@ struct ip_tunnel {
struct gro_cells gro_cells;
};
#define TUNNEL_CSUM __cpu_to_be16(0x01)
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
#define TUNNEL_KEY __cpu_to_be16(0x04)
#define TUNNEL_SEQ __cpu_to_be16(0x08)
#define TUNNEL_STRICT __cpu_to_be16(0x10)
#define TUNNEL_REC __cpu_to_be16(0x20)
#define TUNNEL_VERSION __cpu_to_be16(0x40)
#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
#define TUNNEL_CSUM __cpu_to_be16(0x01)
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
#define TUNNEL_KEY __cpu_to_be16(0x04)
#define TUNNEL_SEQ __cpu_to_be16(0x08)
#define TUNNEL_STRICT __cpu_to_be16(0x10)
#define TUNNEL_REC __cpu_to_be16(0x20)
#define TUNNEL_VERSION __cpu_to_be16(0x40)
#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
#define TUNNEL_OAM __cpu_to_be16(0x0200)
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
struct tnl_ptk_info {
__be16 flags;
......
......@@ -192,6 +192,7 @@ enum ovs_vport_type {
OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
OVS_VPORT_TYPE_GRE, /* GRE tunnel. */
OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */
OVS_VPORT_TYPE_GENEVE, /* Geneve tunnel. */
__OVS_VPORT_TYPE_MAX
};
......@@ -294,7 +295,7 @@ enum ovs_key_attr {
OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
#ifdef __KERNEL__
OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
#endif
__OVS_KEY_ATTR_MAX
};
......@@ -309,6 +310,8 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */
OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */
OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */
OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */
OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
__OVS_TUNNEL_KEY_ATTR_MAX
};
......
......@@ -453,6 +453,20 @@ config TCP_CONG_BIC
increase provides TCP friendliness.
See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/
config GENEVE
tristate "Generic Network Virtualization Encapsulation (Geneve)"
depends on INET
select NET_IP_TUNNEL
select NET_UDP_TUNNEL
---help---
This allows one to create Geneve virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. Geneve is often used
to tunnel virtual network infrastructure in virtualized environments.
For more information see:
http://tools.ietf.org/html/draft-gross-geneve-01
To compile this driver as a module, choose M here: the module
config TCP_CONG_CUBIC
tristate "CUBIC TCP"
default y
......
......@@ -56,6 +56,7 @@ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o xfrm4_protocol.o
/*
* Geneve: Generic Network Virtualization Encapsulation
*
* Copyright (c) 2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/rculist.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/hash.h>
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/geneve.h>
#include <net/protocol.h>
#include <net/udp_tunnel.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
#define PORT_HASH_BITS 8
#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
/* per-network namespace private data for this module */
struct geneve_net {
struct hlist_head sock_list[PORT_HASH_SIZE];
spinlock_t sock_lock; /* Protects sock_list */
};
static int geneve_net_id;
static struct workqueue_struct *geneve_wq;
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
{
return (struct genevehdr *)(udp_hdr(skb) + 1);
}
static struct hlist_head *gs_head(struct net *net, __be16 port)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
return &gn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
}
/* Find geneve socket based on network namespace and UDP port */
static struct geneve_sock *geneve_find_sock(struct net *net, __be16 port)
{
struct geneve_sock *gs;
hlist_for_each_entry_rcu(gs, gs_head(net, port), hlist) {
if (inet_sk(gs->sock->sk)->inet_sport == port)
return gs;
}
return NULL;
}
static void geneve_build_header(struct genevehdr *geneveh,
__be16 tun_flags, u8 vni[3],
u8 options_len, u8 *options)
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = options_len / 4;
geneveh->oam = !!(tun_flags & TUNNEL_OAM);
geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
geneveh->rsvd1 = 0;
memcpy(geneveh->vni, vni, 3);
geneveh->proto_type = htons(ETH_P_TEB);
geneveh->rsvd2 = 0;
memcpy(geneveh->options, options, options_len);
}
/* Transmit a fully formated Geneve frame.
*
* When calling this function. The skb->data should point
* to the geneve header which is fully formed.
*
* This function will add other UDP tunnel headers.
*/
int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
__u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
bool xnet)
{
struct genevehdr *gnvh;
int min_headroom;
int err;
skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
return err;
if (vlan_tx_tag_present(skb)) {
if (unlikely(!__vlan_put_tag(skb,
skb->vlan_proto,
vlan_tx_tag_get(skb)))) {
err = -ENOMEM;
return err;
}
skb->vlan_tci = 0;
}
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
tos, ttl, df, src_port, dst_port, xnet);
}
EXPORT_SYMBOL_GPL(geneve_xmit_skb);
static void geneve_notify_add_rx_port(struct geneve_sock *gs)
{
struct sock *sk = gs->sock->sk;
sa_family_t sa_family = sk->sk_family;
int err;
if (sa_family == AF_INET) {
err = udp_add_offload(&gs->udp_offloads);
if (err)
pr_warn("geneve: udp_add_offload failed with status %d\n",
err);
}
}
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct genevehdr *geneveh;
struct geneve_sock *gs;
int opts_len;
/* Need Geneve and inner Ethernet header to be present */
if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
goto error;
/* Return packets with reserved bits set */
geneveh = geneve_hdr(skb);
if (unlikely(geneveh->ver != GENEVE_VER))
goto error;
if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
goto error;
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
htons(ETH_P_TEB)))
goto drop;
gs = rcu_dereference_sk_user_data(sk);
if (!gs)
goto drop;
gs->rcv(gs, skb);
return 0;
drop:
/* Consume bad packet */
kfree_skb(skb);
return 0;
error:
/* Let the UDP layer deal with the skb */
return 1;
}
static void geneve_del_work(struct work_struct *work)
{
struct geneve_sock *gs = container_of(work, struct geneve_sock,
del_work);
udp_tunnel_sock_release(gs->sock);
kfree_rcu(gs, rcu);
}
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
__be16 port)
{
struct socket *sock;
struct udp_port_cfg udp_conf;
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
if (ipv6) {
udp_conf.family = AF_INET6;
} else {
udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = INADDR_ANY;
}
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
if (err < 0)
return ERR_PTR(err);
return sock;
}
/* Create new listen socket if needed */
static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool ipv6)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
struct socket *sock;
struct udp_tunnel_sock_cfg tunnel_cfg;
gs = kzalloc(sizeof(*gs), GFP_KERNEL);
if (!gs)
return ERR_PTR(-ENOMEM);
INIT_WORK(&gs->del_work, geneve_del_work);
sock = geneve_create_sock(net, ipv6, port);
if (IS_ERR(sock)) {
kfree(gs);
return ERR_CAST(sock);
}
gs->sock = sock;
atomic_set(&gs->refcnt, 1);
gs->rcv = rcv;
gs->rcv_data = data;
/* Initialize the geneve udp offloads structure */
gs->udp_offloads.port = port;
gs->udp_offloads.callbacks.gro_receive = NULL;
gs->udp_offloads.callbacks.gro_complete = NULL;
spin_lock(&gn->sock_lock);
hlist_add_head_rcu(&gs->hlist, gs_head(net, port));
geneve_notify_add_rx_port(gs);
spin_unlock(&gn->sock_lock);
/* Mark socket as an encapsulation socket */
tunnel_cfg.sk_user_data = gs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
return gs;
}
struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6)
{
struct geneve_sock *gs;
gs = geneve_socket_create(net, port, rcv, data, ipv6);
if (!IS_ERR(gs))
return gs;
if (no_share) /* Return error if sharing is not allowed. */
return ERR_PTR(-EINVAL);
gs = geneve_find_sock(net, port);
if (gs) {
if (gs->rcv == rcv)
atomic_inc(&gs->refcnt);
else
gs = ERR_PTR(-EBUSY);
} else {
gs = ERR_PTR(-EINVAL);
}
return gs;
}
EXPORT_SYMBOL_GPL(geneve_sock_add);
void geneve_sock_release(struct geneve_sock *gs)
{
if (!atomic_dec_and_test(&gs->refcnt))
return;
queue_work(geneve_wq, &gs->del_work);
}
EXPORT_SYMBOL_GPL(geneve_sock_release);
static __net_init int geneve_init_net(struct net *net)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
unsigned int h;
spin_lock_init(&gn->sock_lock);
for (h = 0; h < PORT_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&gn->sock_list[h]);
return 0;
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
.exit = NULL,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
static int __init geneve_init_module(void)
{
int rc;
geneve_wq = alloc_workqueue("geneve", 0, 0);
if (!geneve_wq)
return -ENOMEM;
rc = register_pernet_subsys(&geneve_net_ops);
if (rc)
return rc;
pr_info("Geneve driver\n");
return 0;
}
late_initcall(geneve_init_module);
static void __exit geneve_cleanup_module(void)
{
destroy_workqueue(geneve_wq);
}
module_exit(geneve_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
MODULE_DESCRIPTION("Driver for GENEVE encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("geneve");
......@@ -54,3 +54,14 @@ config OPENVSWITCH_VXLAN
Say N to exclude this support and reduce the binary size.
If unsure, say Y.
config OPENVSWITCH_GENEVE
bool "Open vSwitch Geneve tunneling support"
depends on INET
depends on OPENVSWITCH
depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m)
default y
---help---
If you say Y here, then the Open vSwitch will be able create geneve vport.
Say N to exclude this support and reduce the binary size.
......@@ -15,6 +15,10 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
ifneq ($(CONFIG_OPENVSWITCH_GENEVE),)
openvswitch-y += vport-geneve.o
endif
ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
openvswitch-y += vport-vxlan.o
endif
......
......@@ -590,8 +590,8 @@ static int execute_set_action(struct sk_buff *skb,
skb->mark = nla_get_u32(nested_attr);
break;
case OVS_KEY_ATTR_IPV4_TUNNEL:
OVS_CB(skb)->egress_tun_key = nla_data(nested_attr);
case OVS_KEY_ATTR_TUNNEL_INFO:
OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
break;
case OVS_KEY_ATTR_ETHERNET:
......@@ -778,6 +778,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
this_cpu_inc(exec_actions_level);
OVS_CB(skb)->egress_tun_info = NULL;
err = do_execute_actions(dp, skb, key,
acts->actions, acts->actions_len);
......
......@@ -369,6 +369,8 @@ static size_t key_attr_size(void)
+ nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
+ nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
+ nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
+ nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
+ nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
......@@ -555,10 +557,12 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
&flow->key, 0, &acts);
rcu_assign_pointer(flow->sf_acts, acts);
if (err)
goto err_flow_free;
rcu_assign_pointer(flow->sf_acts, acts);
OVS_CB(packet)->egress_tun_info = NULL;
OVS_CB(packet)->flow = flow;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
......@@ -932,11 +936,34 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
return error;
}
static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
const struct sw_flow_key *key,
const struct sw_flow_mask *mask)
{
struct sw_flow_actions *acts;
struct sw_flow_key masked_key;
int error;
acts = ovs_nla_alloc_flow_actions(nla_len(a));
if (IS_ERR(acts))
return acts;
ovs_flow_mask_key(&masked_key, key, mask);
error = ovs_nla_copy_actions(a, &masked_key, 0, &acts);
if (error) {
OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
kfree(acts);
return ERR_PTR(error);
}
return acts;
}
static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key, masked_key;
struct sw_flow_key key;
struct sw_flow *flow;
struct sw_flow_mask mask;
struct sk_buff *reply = NULL;
......@@ -958,17 +985,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
/* Validate actions. */
if (a[OVS_FLOW_ATTR_ACTIONS]) {
acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
error = PTR_ERR(acts);
if (IS_ERR(acts))
acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask);
if (IS_ERR(acts)) {
error = PTR_ERR(acts);
goto error;
ovs_flow_mask_key(&masked_key, &key, &mask);
error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
&masked_key, 0, &acts);
if (error) {
OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
goto err_kfree_acts;
}
}
......
......@@ -102,8 +102,8 @@ struct datapath {
*/
struct ovs_skb_cb {
struct sw_flow *flow;
struct ovs_tunnel_info *egress_tun_info;
struct vport *input_vport;
struct ovs_key_ipv4_tunnel *egress_tun_key;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
......
......@@ -448,6 +448,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
int error;
struct ethhdr *eth;
/* Flags are always used as part of stats */
key->tp.flags = 0;
skb_reset_mac_header(skb);
/* Link layer. We are guaranteed to have at least the 14 byte Ethernet
......@@ -462,6 +465,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
* update skb->csum here.
*/
key->eth.tci = 0;
if (vlan_tx_tag_present(skb))
key->eth.tci = htons(skb->vlan_tci);
else if (eth->h_proto == htons(ETH_P_8021Q))
......@@ -482,6 +486,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
error = check_iphdr(skb);
if (unlikely(error)) {
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv4, 0, sizeof(key->ipv4));
if (error == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
......@@ -503,8 +509,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
return 0;
}
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
else
key->ip.frag = OVS_FRAG_TYPE_NONE;
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
......@@ -513,18 +521,25 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
key->tp.src = tcp->source;
key->tp.dst = tcp->dest;
key->tp.flags = TCP_FLAGS_BE16(tcp);
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_UDP) {
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->tp.src = udp->source;
key->tp.dst = udp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_SCTP) {
if (sctphdr_ok(skb)) {
struct sctphdr *sctp = sctp_hdr(skb);
key->tp.src = sctp->source;
key->tp.dst = sctp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_ICMP) {
if (icmphdr_ok(skb)) {
......@@ -534,33 +549,44 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
* them in 16-bit network byte order. */
key->tp.src = htons(icmp->type);
key->tp.dst = htons(icmp->code);
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
}
} else if ((key->eth.type == htons(ETH_P_ARP) ||
key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
} else if (key->eth.type == htons(ETH_P_ARP) ||
key->eth.type == htons(ETH_P_RARP)) {
struct arp_eth_header *arp;
arp = (struct arp_eth_header *)skb_network_header(skb);
if (arp->ar_hrd == htons(ARPHRD_ETHER)
&& arp->ar_pro == htons(ETH_P_IP)
&& arp->ar_hln == ETH_ALEN
&& arp->ar_pln == 4) {
if (arphdr_ok(skb) &&
arp->ar_hrd == htons(ARPHRD_ETHER) &&
arp->ar_pro == htons(ETH_P_IP) &&
arp->ar_hln == ETH_ALEN &&
arp->ar_pln == 4) {
/* We only match on the lower 8 bits of the opcode. */
if (ntohs(arp->ar_op) <= 0xff)
key->ip.proto = ntohs(arp->ar_op);
else
key->ip.proto = 0;
memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
} else {
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
nh_len = parse_ipv6hdr(skb, key);
if (unlikely(nh_len < 0)) {
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
if (nh_len == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
......@@ -582,24 +608,32 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
key->tp.src = tcp->source;
key->tp.dst = tcp->dest;
key->tp.flags = TCP_FLAGS_BE16(tcp);
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_UDP) {
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->tp.src = udp->source;
key->tp.dst = udp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_SCTP) {
if (sctphdr_ok(skb)) {
struct sctphdr *sctp = sctp_hdr(skb);
key->tp.src = sctp->source;
key->tp.dst = sctp->dest;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_ICMP) {
if (icmp6hdr_ok(skb)) {
error = parse_icmpv6(skb, key, nh_len);
if (error)
return error;
} else {
memset(&key->tp, 0, sizeof(key->tp));
}
}
}
......@@ -611,17 +645,36 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
return key_extract(skb, key);
}
int ovs_flow_key_extract(struct ovs_key_ipv4_tunnel *tun_key,
int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
struct sk_buff *skb, struct sw_flow_key *key)
{
/* Extract metadata from packet. */
memset(key, 0, sizeof(*key));
if (tun_key)
memcpy(&key->tun_key, tun_key, sizeof(key->tun_key));
if (tun_info) {
memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
if (tun_info->options) {
BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
8)) - 1
> sizeof(key->tun_opts));
memcpy(GENEVE_OPTS(key, tun_info->options_len),
tun_info->options, tun_info->options_len);
key->tun_opts_len = tun_info->options_len;
} else {
key->tun_opts_len = 0;
}
} else {
key->tun_opts_len = 0;
memset(&key->tun_key, 0, sizeof(key->tun_key));
}
key->phy.priority = skb->priority;
key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
key->phy.skb_mark = skb->mark;
key->ovs_flow_hash = 0;
key->recirc_id = 0;
/* Flags are always used as part of stats */
key->tp.flags = 0;
return key_extract(skb, key);
}
......@@ -632,7 +685,6 @@ int ovs_flow_key_extract_userspace(const struct nlattr *attr,
{
int err;
memset(key, 0, sizeof(*key));
/* Extract metadata from netlink attributes. */
err = ovs_nla_get_flow_metadata(attr, key);
if (err)
......
......@@ -49,23 +49,45 @@ struct ovs_key_ipv4_tunnel {
u8 ipv4_ttl;
} __packed __aligned(4); /* Minimize padding. */
static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
const struct iphdr *iph, __be64 tun_id,
__be16 tun_flags)
struct ovs_tunnel_info {
struct ovs_key_ipv4_tunnel tunnel;
struct geneve_opt *options;
u8 options_len;
};
/* Store options at the end of the array if they are less than the
* maximum size. This allows us to get the benefits of variable length
* matching for small options.
*/
#define GENEVE_OPTS(flow_key, opt_len) \
((struct geneve_opt *)((flow_key)->tun_opts + \
FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \
opt_len))
static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
const struct iphdr *iph,
__be64 tun_id, __be16 tun_flags,
struct geneve_opt *opts,
u8 opts_len)
{
tun_key->tun_id = tun_id;
tun_key->ipv4_src = iph->saddr;
tun_key->ipv4_dst = iph->daddr;
tun_key->ipv4_tos = iph->tos;
tun_key->ipv4_ttl = iph->ttl;
tun_key->tun_flags = tun_flags;
tun_info->tunnel.tun_id = tun_id;
tun_info->tunnel.ipv4_src = iph->saddr;
tun_info->tunnel.ipv4_dst = iph->daddr;
tun_info->tunnel.ipv4_tos = iph->tos;
tun_info->tunnel.ipv4_ttl = iph->ttl;
tun_info->tunnel.tun_flags = tun_flags;
/* clear struct padding. */
memset((unsigned char *) tun_key + OVS_TUNNEL_KEY_SIZE, 0,
sizeof(*tun_key) - OVS_TUNNEL_KEY_SIZE);
memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0,
sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
tun_info->options = opts;
tun_info->options_len = opts_len;
}
struct sw_flow_key {
u8 tun_opts[255];
u8 tun_opts_len;
struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
struct {
u32 priority; /* Packet QoS priority. */
......@@ -190,8 +212,8 @@ void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
int ovs_flow_key_extract(struct ovs_key_ipv4_tunnel *tun_key,
struct sk_buff *skb, struct sw_flow_key *key);
int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb,
struct sw_flow_key *key);
/* Extract key from packet coming from userspace. */
int ovs_flow_key_extract_userspace(const struct nlattr *attr,
struct sk_buff *skb,
......
This diff is collapsed.
/*
* Copyright (c) 2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/version.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <linux/rculist.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <net/geneve.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/udp.h>
#include <net/xfrm.h>
#include "datapath.h"
#include "vport.h"
/**
* struct geneve_port - Keeps track of open UDP ports
* @sock: The socket created for this port number.
* @name: vport name.
*/
struct geneve_port {
struct geneve_sock *gs;
char name[IFNAMSIZ];
};
static LIST_HEAD(geneve_ports);
static inline struct geneve_port *geneve_vport(const struct vport *vport)
{
return vport_priv(vport);
}
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
{
return (struct genevehdr *)(udp_hdr(skb) + 1);
}
/* Convert 64 bit tunnel ID to 24 bit VNI. */
static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
{
#ifdef __BIG_ENDIAN
vni[0] = (__force __u8)(tun_id >> 16);
vni[1] = (__force __u8)(tun_id >> 8);
vni[2] = (__force __u8)tun_id;
#else
vni[0] = (__force __u8)((__force u64)tun_id >> 40);
vni[1] = (__force __u8)((__force u64)tun_id >> 48);
vni[2] = (__force __u8)((__force u64)tun_id >> 56);
#endif
}
/* Convert 24 bit VNI to 64 bit tunnel ID. */
static __be64 vni_to_tunnel_id(__u8 *vni)
{
#ifdef __BIG_ENDIAN
return (vni[0] << 16) | (vni[1] << 8) | vni[2];
#else
return (__force __be64)(((__force u64)vni[0] << 40) |
((__force u64)vni[1] << 48) |
((__force u64)vni[2] << 56));
#endif
}
static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
{
struct vport *vport = gs->rcv_data;
struct genevehdr *geneveh = geneve_hdr(skb);
int opts_len;
struct ovs_tunnel_info tun_info;
__be64 key;
__be16 flags;
opts_len = geneveh->opt_len * 4;
flags = TUNNEL_KEY | TUNNEL_OPTIONS_PRESENT |
(udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
(geneveh->oam ? TUNNEL_OAM : 0) |
(geneveh->critical ? TUNNEL_CRIT_OPT : 0);
key = vni_to_tunnel_id(geneveh->vni);
ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags,
geneveh->options, opts_len);
ovs_vport_receive(vport, skb, &tun_info);
}
static int geneve_get_options(const struct vport *vport,
struct sk_buff *skb)
{
struct geneve_port *geneve_port = geneve_vport(vport);
__be16 sport;
sport = ntohs(inet_sk(geneve_port->gs->sock->sk)->inet_sport);
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, sport))
return -EMSGSIZE;
return 0;
}
static void geneve_tnl_destroy(struct vport *vport)
{
struct geneve_port *geneve_port = geneve_vport(vport);
geneve_sock_release(geneve_port->gs);
ovs_vport_deferred_free(vport);
}
static struct vport *geneve_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
struct geneve_port *geneve_port;
struct geneve_sock *gs;
struct vport *vport;
struct nlattr *a;
int err;
u16 dst_port;
if (!options) {
err = -EINVAL;
goto error;
}
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
if (a && nla_len(a) == sizeof(u16)) {
dst_port = nla_get_u16(a);
} else {
/* Require destination port from userspace. */
err = -EINVAL;
goto error;
}
vport = ovs_vport_alloc(sizeof(struct geneve_port),
&ovs_geneve_vport_ops, parms);
if (IS_ERR(vport))
return vport;
geneve_port = geneve_vport(vport);
strncpy(geneve_port->name, parms->name, IFNAMSIZ);
gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0);
if (IS_ERR(gs)) {
ovs_vport_free(vport);
return (void *)gs;
}
geneve_port->gs = gs;
return vport;
error:
return ERR_PTR(err);
}
static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct ovs_key_ipv4_tunnel *tun_key;
struct ovs_tunnel_info *tun_info;
struct net *net = ovs_dp_get_net(vport->dp);
struct geneve_port *geneve_port = geneve_vport(vport);
__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
__be16 sport;
struct rtable *rt;
struct flowi4 fl;
u8 vni[3];
__be16 df;
int err;
tun_info = OVS_CB(skb)->egress_tun_info;
if (unlikely(!tun_info)) {
err = -EINVAL;
goto error;
}
tun_key = &tun_info->tunnel;
/* Route lookup */
memset(&fl, 0, sizeof(fl));
fl.daddr = tun_key->ipv4_dst;
fl.saddr = tun_key->ipv4_src;
fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
fl.flowi4_mark = skb->mark;
fl.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(net, &fl);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto error;
}
df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
tunnel_id_to_vni(tun_key->tun_id, vni);
skb->ignore_df = 1;
err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
tun_key->ipv4_dst, tun_key->ipv4_tos,
tun_key->ipv4_ttl, df, sport, dport,
tun_key->tun_flags, vni,
tun_info->options_len, (u8 *)tun_info->options,
false);
if (err < 0)
ip_rt_put(rt);
error:
return err;
}
static const char *geneve_get_name(const struct vport *vport)
{
struct geneve_port *geneve_port = geneve_vport(vport);
return geneve_port->name;
}
const struct vport_ops ovs_geneve_vport_ops = {
.type = OVS_VPORT_TYPE_GENEVE,
.create = geneve_tnl_create,
.destroy = geneve_tnl_destroy,
.get_name = geneve_get_name,
.get_options = geneve_get_options,
.send = geneve_tnl_send,
};
......@@ -63,8 +63,10 @@ static __be16 filter_tnl_flags(__be16 flags)
static struct sk_buff *__build_header(struct sk_buff *skb,
int tunnel_hlen)
{
const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->egress_tun_key;
struct tnl_ptk_info tpi;
const struct ovs_key_ipv4_tunnel *tun_key;
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb))
......@@ -92,7 +94,7 @@ static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
static int gre_rcv(struct sk_buff *skb,
const struct tnl_ptk_info *tpi)
{
struct ovs_key_ipv4_tunnel tun_key;
struct ovs_tunnel_info tun_info;
struct ovs_net *ovs_net;
struct vport *vport;
__be64 key;
......@@ -103,10 +105,10 @@ static int gre_rcv(struct sk_buff *skb,
return PACKET_REJECT;
key = key_to_tunnel_id(tpi->key, tpi->seq);
ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
filter_tnl_flags(tpi->flags));
ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
filter_tnl_flags(tpi->flags), NULL, 0);
ovs_vport_receive(vport, skb, &tun_key);
ovs_vport_receive(vport, skb, &tun_info);
return PACKET_RCVD;
}
......@@ -137,12 +139,12 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
__be16 df;
int err;
if (unlikely(!OVS_CB(skb)->egress_tun_key)) {
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
goto error;
}
tun_key = OVS_CB(skb)->egress_tun_key;
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
/* Route lookup */
memset(&fl, 0, sizeof(fl));
fl.daddr = tun_key->ipv4_dst;
......
......@@ -58,7 +58,7 @@ static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
{
struct ovs_key_ipv4_tunnel tun_key;
struct ovs_tunnel_info tun_info;
struct vport *vport = vs->data;
struct iphdr *iph;
__be64 key;
......@@ -66,9 +66,9 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
/* Save outer tunnel values */
iph = ip_hdr(skb);
key = cpu_to_be64(ntohl(vx_vni) >> 8);
ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0);
ovs_vport_receive(vport, skb, &tun_key);
ovs_vport_receive(vport, skb, &tun_info);
}
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
......@@ -147,12 +147,12 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
__be16 df;
int err;
if (unlikely(!OVS_CB(skb)->egress_tun_key)) {
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
goto error;
}
tun_key = OVS_CB(skb)->egress_tun_key;
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
/* Route lookup */
memset(&fl, 0, sizeof(fl));
fl.daddr = tun_key->ipv4_dst;
......
......@@ -48,6 +48,9 @@ static const struct vport_ops *vport_ops_list[] = {
#ifdef CONFIG_OPENVSWITCH_VXLAN
&ovs_vxlan_vport_ops,
#endif
#ifdef CONFIG_OPENVSWITCH_GENEVE
&ovs_geneve_vport_ops,
#endif
};
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
......@@ -432,7 +435,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb)
* skb->data should point to the Ethernet header.
*/
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
struct ovs_key_ipv4_tunnel *tun_key)
struct ovs_tunnel_info *tun_info)
{
struct pcpu_sw_netstats *stats;
struct sw_flow_key key;
......@@ -445,9 +448,9 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
u64_stats_update_end(&stats->syncp);
OVS_CB(skb)->input_vport = vport;
OVS_CB(skb)->egress_tun_key = NULL;
OVS_CB(skb)->egress_tun_info = NULL;
/* Extract flow from 'skb' into 'key'. */
error = ovs_flow_key_extract(tun_key, skb, &key);
error = ovs_flow_key_extract(tun_info, skb, &key);
if (unlikely(error)) {
kfree_skb(skb);
return;
......
......@@ -207,7 +207,7 @@ static inline struct vport *vport_from_priv(void *priv)
}
void ovs_vport_receive(struct vport *, struct sk_buff *,
struct ovs_key_ipv4_tunnel *);
struct ovs_tunnel_info *);
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */
......@@ -215,6 +215,7 @@ extern const struct vport_ops ovs_netdev_vport_ops;
extern const struct vport_ops ovs_internal_vport_ops;
extern const struct vport_ops ovs_gre_vport_ops;
extern const struct vport_ops ovs_vxlan_vport_ops;
extern const struct vport_ops ovs_geneve_vport_ops;
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment