Commit 6b798d70 authored by David S. Miller's avatar David S. Miller

Merge branch 'net_next_ovs' of git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch

Pravin B Shelar says:

====================
Open vSwitch

First two patches are related to OVS MPLS support. Rest of patches
are mostly refactoring and minor improvements to openvswitch.

v1-v2:
 - Fix conflicts due to "gue: Remote checksum offload"
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 271d70f4 a85311bf
......@@ -47,7 +47,6 @@ enum {
NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
......@@ -119,7 +118,6 @@ enum {
#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
......@@ -183,7 +181,6 @@ enum {
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM | \
NETIF_F_GSO_MPLS)
NETIF_F_GSO_UDP_TUNNEL_CSUM)
#endif /* _LINUX_NETDEV_FEATURES_H */
......@@ -3583,7 +3583,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
......
......@@ -372,9 +372,7 @@ enum {
SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
SKB_GSO_MPLS = 1 << 12,
SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
};
#if BITS_PER_LONG > 32
......
/*
* Copyright (c) 2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _NET_MPLS_H
#define _NET_MPLS_H 1
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#define MPLS_HLEN 4
static inline bool eth_p_mpls(__be16 eth_type)
{
return eth_type == htons(ETH_P_MPLS_UC) ||
eth_type == htons(ETH_P_MPLS_MC);
}
/*
* For non-MPLS skbs this will correspond to the network header.
* For MPLS skbs it will be before the network_header as the MPLS
* label stack lies between the end of the mac header and the network
* header. That is, for MPLS skbs the end of the mac header
* is the top of the MPLS label stack.
*/
static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
{
return skb_mac_header(skb) + skb->mac_len;
}
#endif
......@@ -293,6 +293,9 @@ enum ovs_key_attr {
OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash
is not computed by the datapath. */
OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
* The implementation may restrict
* the accepted length of the array. */
#ifdef __KERNEL__
OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
......@@ -340,6 +343,10 @@ struct ovs_key_ethernet {
__u8 eth_dst[ETH_ALEN];
};
struct ovs_key_mpls {
__be32 mpls_lse;
};
struct ovs_key_ipv4 {
__be32 ipv4_src;
__be32 ipv4_dst;
......@@ -393,9 +400,9 @@ struct ovs_key_arp {
};
struct ovs_key_nd {
__u32 nd_target[4];
__u8 nd_sll[ETH_ALEN];
__u8 nd_tll[ETH_ALEN];
__be32 nd_target[4];
__u8 nd_sll[ETH_ALEN];
__u8 nd_tll[ETH_ALEN];
};
/**
......@@ -483,6 +490,19 @@ enum ovs_userspace_attr {
#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
/**
* struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument.
* @mpls_lse: MPLS label stack entry to push.
* @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame.
*
* The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and
* %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected.
*/
struct ovs_action_push_mpls {
__be32 mpls_lse;
__be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */
};
/**
* struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
* @vlan_tpid: Tag protocol identifier (TPID) to push.
......@@ -534,6 +554,15 @@ struct ovs_action_hash {
* @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
* @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
* top of the packets MPLS label stack. Set the ethertype of the
* encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to
* indicate the new packet contents.
* @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the
* packet's MPLS label stack. Set the encapsulating frame's ethertype to
* indicate the new packet contents. This could potentially still be
* %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there
* is no MPLS label stack, as determined by ethertype, no action is taken.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
......@@ -550,6 +579,9 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */
OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */
OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */
__OVS_ACTION_ATTR_MAX
};
......
......@@ -118,6 +118,7 @@
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/mpls.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
......@@ -2530,7 +2531,7 @@ static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
if (eth_p_mpls(type))
features &= skb->dev->mpls_features;
return features;
......
......@@ -84,7 +84,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation",
[NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
......
......@@ -1223,7 +1223,6 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_MPLS |
0)))
goto out;
......
......@@ -94,7 +94,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_MPLS |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
......
......@@ -207,8 +207,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_IPIP |
SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
SKB_GSO_MPLS) ||
SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
!(type & (SKB_GSO_UDP))))
goto out;
......
......@@ -79,7 +79,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_MPLS |
SKB_GSO_TCPV6 |
0)))
goto out;
......
......@@ -46,8 +46,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_MPLS) ||
SKB_GSO_SIT) ||
!(type & (SKB_GSO_UDP))))
goto out;
......
......@@ -34,8 +34,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_MPLS)))
SKB_GSO_IPIP)))
goto out;
/* Setup inner SKB. */
......
......@@ -30,6 +30,7 @@ config OPENVSWITCH
config OPENVSWITCH_GRE
tristate "Open vSwitch GRE tunneling support"
select NET_MPLS_GSO
depends on INET
depends on OPENVSWITCH
depends on NET_IPGRE_DEMUX
......
......@@ -28,10 +28,12 @@
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/mpls.h>
#include <net/sctp/checksum.h>
#include "datapath.h"
......@@ -118,6 +120,92 @@ static int make_writable(struct sk_buff *skb, int write_len)
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
static int push_mpls(struct sk_buff *skb,
const struct ovs_action_push_mpls *mpls)
{
__be32 *new_mpls_lse;
struct ethhdr *hdr;
/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
if (skb->encapsulation)
return -ENOTSUPP;
if (skb_cow_head(skb, MPLS_HLEN) < 0)
return -ENOMEM;
skb_push(skb, MPLS_HLEN);
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
skb->mac_len);
skb_reset_mac_header(skb);
new_mpls_lse = (__be32 *)skb_mpls_header(skb);
*new_mpls_lse = mpls->mpls_lse;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
MPLS_HLEN, 0));
hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype;
skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
return 0;
}
static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
{
struct ethhdr *hdr;
int err;
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum,
csum_partial(skb_mpls_header(skb),
MPLS_HLEN, 0));
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
skb->mac_len);
__skb_pull(skb, MPLS_HLEN);
skb_reset_mac_header(skb);
/* skb_mpls_header() is used to locate the ethertype
* field correctly in the presence of VLAN tags.
*/
hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
hdr->h_proto = ethertype;
if (eth_p_mpls(skb->protocol))
skb->protocol = ethertype;
return 0;
}
static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse)
{
__be32 *stack;
int err;
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
stack = (__be32 *)skb_mpls_header(skb);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be32 diff[] = { ~(*stack), *mpls_lse };
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
~skb->csum);
}
*stack = *mpls_lse;
return 0;
}
/* remove VLAN header from packet and update csum accordingly. */
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
{
......@@ -140,10 +228,12 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
vlan_set_encap_proto(skb, vhdr);
skb->mac_header += VLAN_HLEN;
if (skb_network_offset(skb) < ETH_HLEN)
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_len(skb);
/* Update mac_len for subsequent MPLS actions */
skb_reset_mac_len(skb);
return 0;
}
......@@ -186,6 +276,8 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
return -ENOMEM;
/* Update mac_len for subsequent MPLS actions */
skb->mac_len += VLAN_HLEN;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(skb->csum, csum_partial(skb->data
......@@ -459,21 +551,14 @@ static int set_sctp(struct sk_buff *skb,
return 0;
}
static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
{
struct vport *vport;
struct vport *vport = ovs_vport_rcu(dp, out_port);
if (unlikely(!skb))
return -ENOMEM;
vport = ovs_vport_rcu(dp, out_port);
if (unlikely(!vport)) {
if (likely(vport))
ovs_vport_send(vport, skb);
else
kfree_skb(skb);
return -ENODEV;
}
ovs_vport_send(vport, skb);
return 0;
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
......@@ -612,6 +697,10 @@ static int execute_set_action(struct sk_buff *skb,
case OVS_KEY_ATTR_SCTP:
err = set_sctp(skb, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_MPLS:
err = set_mpls(skb, nla_data(nested_attr));
break;
}
return err;
......@@ -672,8 +761,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
a = nla_next(a, &rem)) {
int err = 0;
if (prev_port != -1) {
do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
if (unlikely(prev_port != -1)) {
struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
if (out_skb)
do_output(dp, out_skb, prev_port);
prev_port = -1;
}
......@@ -690,6 +783,14 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
execute_hash(skb, key, a);
break;
case OVS_ACTION_ATTR_PUSH_MPLS:
err = push_mpls(skb, nla_data(a));
break;
case OVS_ACTION_ATTR_POP_MPLS:
err = pop_mpls(skb, nla_get_be16(a));
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
err = push_vlan(skb, nla_data(a));
if (unlikely(err)) /* skb already freed. */
......@@ -764,14 +865,11 @@ static void process_deferred_actions(struct datapath *dp)
/* Execute a list of actions against 'skb'. */
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key)
struct sw_flow_actions *acts, struct sw_flow_key *key)
{
int level = this_cpu_read(exec_actions_level);
struct sw_flow_actions *acts;
int err;
acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
this_cpu_inc(exec_actions_level);
OVS_CB(skb)->egress_tun_info = NULL;
err = do_execute_actions(dp, skb, key,
......
This diff is collapsed.
......@@ -94,14 +94,12 @@ struct datapath {
/**
* struct ovs_skb_cb - OVS data in skb CB
* @flow: The flow associated with this packet. May be %NULL if no flow.
* @egress_tun_key: Tunnel information about this packet on egress path.
* NULL if the packet is not being tunneled.
* @input_vport: The original vport packet came in on. This value is cached
* when a packet is received by OVS.
*/
struct ovs_skb_cb {
struct sw_flow *flow;
struct ovs_tunnel_info *egress_tun_info;
struct vport *input_vport;
};
......@@ -194,7 +192,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd);
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *);
struct sw_flow_actions *acts, struct sw_flow_key *);
void ovs_dp_notify_wq(struct work_struct *work);
......
......@@ -32,6 +32,7 @@
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/mpls.h>
#include <linux/sctp.h>
#include <linux/smp.h>
#include <linux/tcp.h>
......@@ -42,6 +43,7 @@
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/ipv6.h>
#include <net/mpls.h>
#include <net/ndisc.h>
#include "datapath.h"
......@@ -480,6 +482,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
return -ENOMEM;
skb_reset_network_header(skb);
skb_reset_mac_len(skb);
__skb_push(skb, skb->data - skb_mac_header(skb));
/* Network layer. */
......@@ -584,6 +587,33 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (eth_p_mpls(key->eth.type)) {
size_t stack_len = MPLS_HLEN;
/* In the presence of an MPLS label stack the end of the L2
* header and the beginning of the L3 header differ.
*
* Advance network_header to the beginning of the L3
* header. mac_len corresponds to the end of the L2 header.
*/
while (1) {
__be32 lse;
error = check_header(skb, skb->mac_len + stack_len);
if (unlikely(error))
return 0;
memcpy(&lse, skb_network_header(skb), MPLS_HLEN);
if (stack_len == MPLS_HLEN)
memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
skb_set_network_header(skb, skb->mac_len + stack_len);
if (lse & htonl(MPLS_LS_S_MASK))
break;
stack_len += MPLS_HLEN;
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
......
......@@ -102,12 +102,17 @@ struct sw_flow_key {
__be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
__be16 type; /* Ethernet frame type. */
} eth;
struct {
u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
u8 tos; /* IP ToS. */
u8 ttl; /* IP TTL/hop limit. */
u8 frag; /* One of OVS_FRAG_TYPE_*. */
} ip;
union {
struct {
__be32 top_lse; /* top label stack entry */
} mpls;
struct {
u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
u8 tos; /* IP ToS. */
u8 ttl; /* IP TTL/hop limit. */
u8 frag; /* One of OVS_FRAG_TYPE_*. */
} ip;
};
struct {
__be16 src; /* TCP/UDP/SCTP source port. */
__be16 dst; /* TCP/UDP/SCTP destination port. */
......
This diff is collapsed.
......@@ -37,6 +37,8 @@
#include "flow.h"
size_t ovs_key_attr_size(void);
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key, struct sw_flow_mask *mask);
......@@ -49,12 +51,11 @@ int ovs_nla_get_match(struct sw_flow_match *match,
const struct nlattr *);
int ovs_nla_copy_actions(const struct nlattr *attr,
const struct sw_flow_key *key, int depth,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa);
int ovs_nla_put_actions(const struct nlattr *attr,
int len, struct sk_buff *skb);
struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len);
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
#endif /* flow_netlink.h */
/*
* Copyright (c) 2007-2013 Nicira, Inc.
* Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
......@@ -250,11 +250,14 @@ static void table_instance_destroy(struct table_instance *ti, bool deferred)
__table_instance_destroy(ti);
}
void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
/* No need for locking this function is called from RCU callback or
* error path.
*/
void ovs_flow_tbl_destroy(struct flow_table *table)
{
struct table_instance *ti = ovsl_dereference(table->ti);
struct table_instance *ti = rcu_dereference_raw(table->ti);
table_instance_destroy(ti, deferred);
table_instance_destroy(ti, false);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
......
......@@ -62,7 +62,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
int ovs_flow_tbl_count(struct flow_table *table);
void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
void ovs_flow_tbl_destroy(struct flow_table *table);
int ovs_flow_tbl_flush(struct flow_table *flow_table);
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
......
......@@ -224,6 +224,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
struct net_device *netdev = netdev_vport_priv(vport)->dev;
int len;
if (unlikely(!(netdev->flags & IFF_UP))) {
kfree_skb(skb);
return 0;
}
len = skb->len;
skb_dst_drop(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment