Commit 39b6b299 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch

Jesse Gross says:

====================
[GIT net-next] Open vSwitch

Open vSwitch changes for net-next/3.14. Highlights are:
 * Performance improvements in the mechanism to get packets to userspace
   using memory mapped netlink and skb zero copy where appropriate.
 * Per-cpu flow stats in situations where flows are likely to be shared
   across CPUs. Standard flow stats are used in other situations to save
   memory and allocation time.
 * A handful of code cleanups and rationalization.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 56a4342d 443cd88c
...@@ -2445,6 +2445,9 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ...@@ -2445,6 +2445,9 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len, struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags); unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet); void skb_scrub_packet(struct sk_buff *skb, bool xnet);
......
...@@ -73,6 +73,7 @@ struct genl_family { ...@@ -73,6 +73,7 @@ struct genl_family {
* @attrs: netlink attributes * @attrs: netlink attributes
* @_net: network namespace * @_net: network namespace
* @user_ptr: user pointers * @user_ptr: user pointers
* @dst_sk: destination socket
*/ */
struct genl_info { struct genl_info {
u32 snd_seq; u32 snd_seq;
...@@ -85,6 +86,7 @@ struct genl_info { ...@@ -85,6 +86,7 @@ struct genl_info {
struct net * _net; struct net * _net;
#endif #endif
void * user_ptr[2]; void * user_ptr[2];
struct sock * dst_sk;
}; };
static inline struct net *genl_info_net(struct genl_info *info) static inline struct net *genl_info_net(struct genl_info *info)
...@@ -177,6 +179,8 @@ void genl_notify(struct genl_family *family, ...@@ -177,6 +179,8 @@ void genl_notify(struct genl_family *family,
struct sk_buff *skb, struct net *net, u32 portid, struct sk_buff *skb, struct net *net, u32 portid,
u32 group, struct nlmsghdr *nlh, gfp_t flags); u32 group, struct nlmsghdr *nlh, gfp_t flags);
struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
gfp_t flags);
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
struct genl_family *family, int flags, u8 cmd); struct genl_family *family, int flags, u8 cmd);
......
...@@ -40,7 +40,15 @@ struct ovs_header { ...@@ -40,7 +40,15 @@ struct ovs_header {
#define OVS_DATAPATH_FAMILY "ovs_datapath" #define OVS_DATAPATH_FAMILY "ovs_datapath"
#define OVS_DATAPATH_MCGROUP "ovs_datapath" #define OVS_DATAPATH_MCGROUP "ovs_datapath"
#define OVS_DATAPATH_VERSION 0x1
/* V2:
* - API users are expected to provide OVS_DP_ATTR_USER_FEATURES
* when creating the datapath.
*/
#define OVS_DATAPATH_VERSION 2
/* First OVS datapath version to support features */
#define OVS_DP_VER_FEATURES 2
enum ovs_datapath_cmd { enum ovs_datapath_cmd {
OVS_DP_CMD_UNSPEC, OVS_DP_CMD_UNSPEC,
...@@ -75,6 +83,7 @@ enum ovs_datapath_attr { ...@@ -75,6 +83,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */ OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
__OVS_DP_ATTR_MAX __OVS_DP_ATTR_MAX
}; };
...@@ -106,6 +115,9 @@ struct ovs_vport_stats { ...@@ -106,6 +115,9 @@ struct ovs_vport_stats {
__u64 tx_dropped; /* no space available in linux */ __u64 tx_dropped; /* no space available in linux */
}; };
/* Allow last Netlink attribute to be unaligned */
#define OVS_DP_F_UNALIGNED (1 << 0)
/* Fixed logical ports. */ /* Fixed logical ports. */
#define OVSP_LOCAL ((__u32)0) #define OVSP_LOCAL ((__u32)0)
......
...@@ -2121,6 +2121,91 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, ...@@ -2121,6 +2121,91 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
} }
EXPORT_SYMBOL(skb_copy_and_csum_bits); EXPORT_SYMBOL(skb_copy_and_csum_bits);
/**
* skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
* @from: source buffer
*
* Calculates the amount of linear headroom needed in the 'to' skb passed
* into skb_zerocopy().
*/
unsigned int
skb_zerocopy_headlen(const struct sk_buff *from)
{
unsigned int hlen = 0;
if (!from->head_frag ||
skb_headlen(from) < L1_CACHE_BYTES ||
skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
hlen = skb_headlen(from);
if (skb_has_frag_list(from))
hlen = from->len;
return hlen;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
/**
* skb_zerocopy - Zero copy skb to skb
* @to: destination buffer
* @source: source buffer
* @len: number of bytes to copy from source buffer
* @hlen: size of linear headroom in destination buffer
*
* Copies up to `len` bytes from `from` to `to` by creating references
* to the frags in the source buffer.
*
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
* headroom in the `to` buffer.
*/
void
skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
struct page *page;
unsigned int offset;
BUG_ON(!from->head_frag && !hlen);
/* dont bother with small payloads */
if (len <= skb_tailroom(to)) {
skb_copy_bits(from, 0, skb_put(to, len), len);
return;
}
if (hlen) {
skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
__skb_fill_page_desc(to, 0, page, offset, plen);
get_page(page);
j = 1;
len -= plen;
}
}
to->truesize += len + plen;
to->len += len + plen;
to->data_len += len + plen;
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
if (!len)
break;
skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
len -= skb_shinfo(to)->frags[j].size;
skb_frag_ref(to, j);
j++;
}
skb_shinfo(to)->nr_frags = j;
}
EXPORT_SYMBOL_GPL(skb_zerocopy);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
{ {
__wsum csum; __wsum csum;
......
...@@ -236,51 +236,6 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) ...@@ -236,51 +236,6 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
spin_unlock_bh(&queue->lock); spin_unlock_bh(&queue->lock);
} }
static void
nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
struct page *page;
unsigned int offset;
/* dont bother with small payloads */
if (len <= skb_tailroom(to)) {
skb_copy_bits(from, 0, skb_put(to, len), len);
return;
}
if (hlen) {
skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
__skb_fill_page_desc(to, 0, page, offset, plen);
get_page(page);
j = 1;
len -= plen;
}
}
to->truesize += len + plen;
to->len += len + plen;
to->data_len += len + plen;
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
if (!len)
break;
skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
len -= skb_shinfo(to)->frags[j].size;
skb_frag_ref(to, j);
j++;
}
skb_shinfo(to)->nr_frags = j;
}
static int static int
nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
bool csum_verify) bool csum_verify)
...@@ -330,7 +285,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, ...@@ -330,7 +285,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
{ {
size_t size; size_t size;
size_t data_len = 0, cap_len = 0; size_t data_len = 0, cap_len = 0;
int hlen = 0; unsigned int hlen = 0;
struct sk_buff *skb; struct sk_buff *skb;
struct nlattr *nla; struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg; struct nfqnl_msg_packet_hdr *pmsg;
...@@ -382,14 +337,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, ...@@ -382,14 +337,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (data_len > entskb->len) if (data_len > entskb->len)
data_len = entskb->len; data_len = entskb->len;
if (!entskb->head_frag || hlen = skb_zerocopy_headlen(entskb);
skb_headlen(entskb) < L1_CACHE_BYTES || hlen = min_t(unsigned int, hlen, data_len);
skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS)
hlen = skb_headlen(entskb);
if (skb_has_frag_list(entskb))
hlen = entskb->len;
hlen = min_t(int, data_len, hlen);
size += sizeof(struct nlattr) + hlen; size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len; cap_len = entskb->len;
break; break;
...@@ -539,7 +488,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, ...@@ -539,7 +488,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
nla->nla_type = NFQA_PAYLOAD; nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len); nla->nla_len = nla_attr_size(data_len);
nfqnl_zcopy(skb, entskb, data_len, hlen); skb_zerocopy(skb, entskb, data_len, hlen);
} }
nlh->nlmsg_len = skb->len; nlh->nlmsg_len = skb->len;
......
...@@ -1773,6 +1773,9 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, ...@@ -1773,6 +1773,9 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL) if (ring->pg_vec == NULL)
goto out_put; goto out_put;
if (ring->frame_size - NL_MMAP_HDRLEN < size)
goto out_put;
skb = alloc_skb_head(gfp_mask); skb = alloc_skb_head(gfp_mask);
if (skb == NULL) if (skb == NULL)
goto err1; goto err1;
...@@ -1782,6 +1785,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, ...@@ -1782,6 +1785,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL) if (ring->pg_vec == NULL)
goto out_free; goto out_free;
/* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN; maxlen = ring->frame_size - NL_MMAP_HDRLEN;
if (maxlen < size) if (maxlen < size)
goto out_free; goto out_free;
......
...@@ -460,6 +460,26 @@ int genl_unregister_family(struct genl_family *family) ...@@ -460,6 +460,26 @@ int genl_unregister_family(struct genl_family *family)
} }
EXPORT_SYMBOL(genl_unregister_family); EXPORT_SYMBOL(genl_unregister_family);
/**
* genlmsg_new_unicast - Allocate generic netlink message for unicast
* @payload: size of the message payload
* @info: information on destination
* @flags: the type of memory to allocate
*
* Allocates a new sk_buff large enough to cover the specified payload
* plus required Netlink headers. Will check receiving socket for
* memory mapped i/o capability and use it if enabled. Will fall back
* to non-mapped skb if message size exceeds the frame size of the ring.
*/
struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
gfp_t flags)
{
size_t len = nlmsg_total_size(genlmsg_total_size(payload));
return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags);
}
EXPORT_SYMBOL_GPL(genlmsg_new_unicast);
/** /**
* genlmsg_put - Add generic netlink header to netlink message * genlmsg_put - Add generic netlink header to netlink message
* @skb: socket buffer holding the message * @skb: socket buffer holding the message
...@@ -600,6 +620,7 @@ static int genl_family_rcv_msg(struct genl_family *family, ...@@ -600,6 +620,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
info.genlhdr = nlmsg_data(nlh); info.genlhdr = nlmsg_data(nlh);
info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
info.attrs = attrbuf; info.attrs = attrbuf;
info.dst_sk = skb->sk;
genl_info_net_set(&info, net); genl_info_net_set(&info, net);
memset(&info.user_ptr, 0, sizeof(info.user_ptr)); memset(&info.user_ptr, 0, sizeof(info.user_ptr));
......
This diff is collapsed.
...@@ -88,6 +88,8 @@ struct datapath { ...@@ -88,6 +88,8 @@ struct datapath {
/* Network namespace ref. */ /* Network namespace ref. */
struct net *net; struct net *net;
#endif #endif
u32 user_features;
}; };
/** /**
...@@ -145,6 +147,8 @@ int lockdep_ovsl_is_held(void); ...@@ -145,6 +147,8 @@ int lockdep_ovsl_is_held(void);
#define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held())) #define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held()))
#define ovsl_dereference(p) \ #define ovsl_dereference(p) \
rcu_dereference_protected(p, lockdep_ovsl_is_held()) rcu_dereference_protected(p, lockdep_ovsl_is_held())
#define rcu_dereference_ovsl(p) \
rcu_dereference_check(p, lockdep_ovsl_is_held())
static inline struct net *ovs_dp_get_net(struct datapath *dp) static inline struct net *ovs_dp_get_net(struct datapath *dp)
{ {
...@@ -178,14 +182,12 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n ...@@ -178,14 +182,12 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n
extern struct notifier_block ovs_dp_device_notifier; extern struct notifier_block ovs_dp_device_notifier;
extern struct genl_family dp_vport_genl_family; extern struct genl_family dp_vport_genl_family;
extern struct genl_multicast_group ovs_dp_vport_multicast_group;
void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
void ovs_dp_detach_port(struct vport *); void ovs_dp_detach_port(struct vport *);
int ovs_dp_upcall(struct datapath *, struct sk_buff *, int ovs_dp_upcall(struct datapath *, struct sk_buff *,
const struct dp_upcall_info *); const struct dp_upcall_info *);
const char *ovs_dp_name(const struct datapath *dp);
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd); u8 cmd);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/smp.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/udp.h> #include <linux/udp.h>
#include <linux/icmp.h> #include <linux/icmp.h>
...@@ -60,10 +61,16 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) ...@@ -60,10 +61,16 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
{ {
struct flow_stats *stats;
__be16 tcp_flags = 0; __be16 tcp_flags = 0;
if (!flow->stats.is_percpu)
stats = flow->stats.stat;
else
stats = this_cpu_ptr(flow->stats.cpu_stats);
if ((flow->key.eth.type == htons(ETH_P_IP) || if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) && flow->key.eth.type == htons(ETH_P_IPV6)) &&
flow->key.ip.proto == IPPROTO_TCP && flow->key.ip.proto == IPPROTO_TCP &&
...@@ -71,12 +78,87 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) ...@@ -71,12 +78,87 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
} }
spin_lock(&flow->lock); spin_lock(&stats->lock);
flow->used = jiffies; stats->used = jiffies;
flow->packet_count++; stats->packet_count++;
flow->byte_count += skb->len; stats->byte_count += skb->len;
flow->tcp_flags |= tcp_flags; stats->tcp_flags |= tcp_flags;
spin_unlock(&flow->lock); spin_unlock(&stats->lock);
}
static void stats_read(struct flow_stats *stats,
struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
spin_lock(&stats->lock);
if (time_after(stats->used, *used))
*used = stats->used;
*tcp_flags |= stats->tcp_flags;
ovs_stats->n_packets += stats->packet_count;
ovs_stats->n_bytes += stats->byte_count;
spin_unlock(&stats->lock);
}
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
int cpu, cur_cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
if (!flow->stats.is_percpu) {
stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
} else {
cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
struct flow_stats *stats;
if (cpu == cur_cpu)
local_bh_disable();
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
stats_read(stats, ovs_stats, used, tcp_flags);
if (cpu == cur_cpu)
local_bh_enable();
}
put_cpu();
}
}
static void stats_reset(struct flow_stats *stats)
{
spin_lock(&stats->lock);
stats->used = 0;
stats->packet_count = 0;
stats->byte_count = 0;
stats->tcp_flags = 0;
spin_unlock(&stats->lock);
}
void ovs_flow_stats_clear(struct sw_flow *flow)
{
int cpu, cur_cpu;
if (!flow->stats.is_percpu) {
stats_reset(flow->stats.stat);
} else {
cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
if (cpu == cur_cpu)
local_bh_disable();
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
if (cpu == cur_cpu)
local_bh_enable();
}
put_cpu();
}
} }
static int check_header(struct sk_buff *skb, int len) static int check_header(struct sk_buff *skb, int len)
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#ifndef FLOW_H #ifndef FLOW_H
#define FLOW_H 1 #define FLOW_H 1
#include <linux/cache.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/netlink.h> #include <linux/netlink.h>
#include <linux/openvswitch.h> #include <linux/openvswitch.h>
...@@ -122,8 +123,8 @@ struct sw_flow_key { ...@@ -122,8 +123,8 @@ struct sw_flow_key {
} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
struct sw_flow_key_range { struct sw_flow_key_range {
size_t start; unsigned short int start;
size_t end; unsigned short int end;
}; };
struct sw_flow_mask { struct sw_flow_mask {
...@@ -146,6 +147,22 @@ struct sw_flow_actions { ...@@ -146,6 +147,22 @@ struct sw_flow_actions {
struct nlattr actions[]; struct nlattr actions[];
}; };
struct flow_stats {
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
unsigned long used; /* Last used time (in jiffies). */
spinlock_t lock; /* Lock for atomic stats update. */
__be16 tcp_flags; /* Union of seen TCP flags. */
};
struct sw_flow_stats {
bool is_percpu;
union {
struct flow_stats *stat;
struct flow_stats __percpu *cpu_stats;
};
};
struct sw_flow { struct sw_flow {
struct rcu_head rcu; struct rcu_head rcu;
struct hlist_node hash_node[2]; struct hlist_node hash_node[2];
...@@ -155,12 +172,7 @@ struct sw_flow { ...@@ -155,12 +172,7 @@ struct sw_flow {
struct sw_flow_key unmasked_key; struct sw_flow_key unmasked_key;
struct sw_flow_mask *mask; struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts; struct sw_flow_actions __rcu *sf_acts;
struct sw_flow_stats stats;
spinlock_t lock; /* Lock for values below. */
unsigned long used; /* Last used time (in jiffies). */
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
__be16 tcp_flags; /* Union of seen TCP flags. */
}; };
struct arp_eth_header { struct arp_eth_header {
...@@ -177,7 +189,10 @@ struct arp_eth_header { ...@@ -177,7 +189,10 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */ unsigned char ar_tip[4]; /* target IP address */
} __packed; } __packed;
void ovs_flow_used(struct sw_flow *, struct sk_buff *); void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb);
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats,
unsigned long *used, __be16 *tcp_flags);
void ovs_flow_stats_clear(struct sw_flow *flow);
u64 ovs_flow_used_time(unsigned long flow_jiffies); u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
......
...@@ -266,6 +266,20 @@ static bool is_all_zero(const u8 *fp, size_t size) ...@@ -266,6 +266,20 @@ static bool is_all_zero(const u8 *fp, size_t size)
return true; return true;
} }
static bool is_all_set(const u8 *fp, size_t size)
{
int i;
if (!fp)
return false;
for (i = 0; i < size; i++)
if (fp[i] != 0xff)
return false;
return true;
}
static int __parse_flow_nlattrs(const struct nlattr *attr, static int __parse_flow_nlattrs(const struct nlattr *attr,
const struct nlattr *a[], const struct nlattr *a[],
u64 *attrsp, bool nz) u64 *attrsp, bool nz)
...@@ -487,8 +501,9 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, ...@@ -487,8 +501,9 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
return 0; return 0;
} }
static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple,
const struct nlattr **a, bool is_mask) u64 attrs, const struct nlattr **a,
bool is_mask)
{ {
int err; int err;
u64 orig_attrs = attrs; u64 orig_attrs = attrs;
...@@ -545,6 +560,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ...@@ -545,6 +560,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
} }
if (is_mask && exact_5tuple) {
if (match->mask->key.eth.type != htons(0xffff))
*exact_5tuple = false;
}
if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key; const struct ovs_key_ipv4 *ipv4_key;
...@@ -567,6 +587,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ...@@ -567,6 +587,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
SW_FLOW_KEY_PUT(match, ipv4.addr.dst, SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
ipv4_key->ipv4_dst, is_mask); ipv4_key->ipv4_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV4); attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
if (is_mask && exact_5tuple && *exact_5tuple) {
if (ipv4_key->ipv4_proto != 0xff ||
ipv4_key->ipv4_src != htonl(0xffffffff) ||
ipv4_key->ipv4_dst != htonl(0xffffffff))
*exact_5tuple = false;
}
} }
if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
...@@ -598,6 +625,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ...@@ -598,6 +625,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
is_mask); is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV6); attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
if (is_mask && exact_5tuple && *exact_5tuple) {
if (ipv6_key->ipv6_proto != 0xff ||
!is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
!is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
*exact_5tuple = false;
}
} }
if (attrs & (1 << OVS_KEY_ATTR_ARP)) { if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
...@@ -640,6 +674,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ...@@ -640,6 +674,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
tcp_key->tcp_dst, is_mask); tcp_key->tcp_dst, is_mask);
} }
attrs &= ~(1 << OVS_KEY_ATTR_TCP); attrs &= ~(1 << OVS_KEY_ATTR_TCP);
if (is_mask && exact_5tuple && *exact_5tuple &&
(tcp_key->tcp_src != htons(0xffff) ||
tcp_key->tcp_dst != htons(0xffff)))
*exact_5tuple = false;
} }
if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
...@@ -671,6 +710,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ...@@ -671,6 +710,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
udp_key->udp_dst, is_mask); udp_key->udp_dst, is_mask);
} }
attrs &= ~(1 << OVS_KEY_ATTR_UDP); attrs &= ~(1 << OVS_KEY_ATTR_UDP);
if (is_mask && exact_5tuple && *exact_5tuple &&
(udp_key->udp_src != htons(0xffff) ||
udp_key->udp_dst != htons(0xffff)))
*exact_5tuple = false;
} }
if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
...@@ -756,6 +800,7 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask, ...@@ -756,6 +800,7 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
* attribute specifies the mask field of the wildcarded flow. * attribute specifies the mask field of the wildcarded flow.
*/ */
int ovs_nla_get_match(struct sw_flow_match *match, int ovs_nla_get_match(struct sw_flow_match *match,
bool *exact_5tuple,
const struct nlattr *key, const struct nlattr *key,
const struct nlattr *mask) const struct nlattr *mask)
{ {
...@@ -803,10 +848,13 @@ int ovs_nla_get_match(struct sw_flow_match *match, ...@@ -803,10 +848,13 @@ int ovs_nla_get_match(struct sw_flow_match *match,
} }
} }
err = ovs_key_from_nlattrs(match, key_attrs, a, false); err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false);
if (err) if (err)
return err; return err;
if (exact_5tuple)
*exact_5tuple = true;
if (mask) { if (mask) {
err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
if (err) if (err)
...@@ -844,7 +892,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, ...@@ -844,7 +892,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
} }
} }
err = ovs_key_from_nlattrs(match, mask_attrs, a, true); err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true);
if (err) if (err)
return err; return err;
} else { } else {
...@@ -1128,19 +1176,11 @@ struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) ...@@ -1128,19 +1176,11 @@ struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
return sfa; return sfa;
} }
/* RCU callback used by ovs_nla_free_flow_actions. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
struct sw_flow_actions *sf_acts = container_of(rcu,
struct sw_flow_actions, rcu);
kfree(sf_acts);
}
/* Schedules 'sf_acts' to be freed after the next RCU grace period. /* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */ * The caller must hold rcu_read_lock for this to be sensible. */
void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
{ {
call_rcu(&sf_acts->rcu, rcu_free_acts_callback); kfree_rcu(sf_acts, rcu);
} }
static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
......
...@@ -45,6 +45,7 @@ int ovs_nla_put_flow(const struct sw_flow_key *, ...@@ -45,6 +45,7 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
int ovs_nla_get_flow_metadata(struct sw_flow *flow, int ovs_nla_get_flow_metadata(struct sw_flow *flow,
const struct nlattr *attr); const struct nlattr *attr);
int ovs_nla_get_match(struct sw_flow_match *match, int ovs_nla_get_match(struct sw_flow_match *match,
bool *exact_5tuple,
const struct nlattr *, const struct nlattr *,
const struct nlattr *); const struct nlattr *);
......
...@@ -44,8 +44,6 @@ ...@@ -44,8 +44,6 @@
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/ndisc.h> #include <net/ndisc.h>
#include "datapath.h"
#define TBL_MIN_BUCKETS 1024 #define TBL_MIN_BUCKETS 1024
#define REHASH_INTERVAL (10 * 60 * HZ) #define REHASH_INTERVAL (10 * 60 * HZ)
...@@ -72,19 +70,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, ...@@ -72,19 +70,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
*d++ = *s++ & *m++; *d++ = *s++ & *m++;
} }
struct sw_flow *ovs_flow_alloc(void) struct sw_flow *ovs_flow_alloc(bool percpu_stats)
{ {
struct sw_flow *flow; struct sw_flow *flow;
int cpu;
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
if (!flow) if (!flow)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
spin_lock_init(&flow->lock);
flow->sf_acts = NULL; flow->sf_acts = NULL;
flow->mask = NULL; flow->mask = NULL;
flow->stats.is_percpu = percpu_stats;
if (!percpu_stats) {
flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
if (!flow->stats.stat)
goto err;
spin_lock_init(&flow->stats.stat->lock);
} else {
flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
if (!flow->stats.cpu_stats)
goto err;
for_each_possible_cpu(cpu) {
struct flow_stats *cpu_stats;
cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
spin_lock_init(&cpu_stats->lock);
}
}
return flow; return flow;
err:
kfree(flow);
return ERR_PTR(-ENOMEM);
} }
int ovs_flow_tbl_count(struct flow_table *table) int ovs_flow_tbl_count(struct flow_table *table)
...@@ -118,6 +139,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) ...@@ -118,6 +139,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow) static void flow_free(struct sw_flow *flow)
{ {
kfree((struct sf_flow_acts __force *)flow->sf_acts); kfree((struct sf_flow_acts __force *)flow->sf_acts);
if (flow->stats.is_percpu)
free_percpu(flow->stats.cpu_stats);
else
kfree(flow->stats.stat);
kmem_cache_free(flow_cache, flow); kmem_cache_free(flow_cache, flow);
} }
...@@ -128,13 +153,6 @@ static void rcu_free_flow_callback(struct rcu_head *rcu) ...@@ -128,13 +153,6 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
flow_free(flow); flow_free(flow);
} }
static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
{
struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
kfree(mask);
}
static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
{ {
if (!mask) if (!mask)
...@@ -146,7 +164,7 @@ static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) ...@@ -146,7 +164,7 @@ static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
if (!mask->ref_count) { if (!mask->ref_count) {
list_del_rcu(&mask->list); list_del_rcu(&mask->list);
if (deferred) if (deferred)
call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); kfree_rcu(mask, rcu);
else else
kfree(mask); kfree(mask);
} }
...@@ -429,11 +447,11 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, ...@@ -429,11 +447,11 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
return NULL; return NULL;
} }
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key, const struct sw_flow_key *key,
u32 *n_mask_hit) u32 *n_mask_hit)
{ {
struct table_instance *ti = rcu_dereference(tbl->ti); struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask; struct sw_flow_mask *mask;
struct sw_flow *flow; struct sw_flow *flow;
...@@ -447,6 +465,14 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, ...@@ -447,6 +465,14 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
return NULL; return NULL;
} }
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
const struct sw_flow_key *key)
{
u32 __always_unused n_mask_hit;
return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
}
int ovs_flow_tbl_num_masks(const struct flow_table *table) int ovs_flow_tbl_num_masks(const struct flow_table *table)
{ {
struct sw_flow_mask *mask; struct sw_flow_mask *mask;
...@@ -514,11 +540,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, ...@@ -514,11 +540,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
return NULL; return NULL;
} }
/** /* Add 'mask' into the mask list, if it is not already there. */
* add a new mask into the mask list.
* The caller needs to make sure that 'mask' is not the same
* as any masks that are already on the list.
*/
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
struct sw_flow_mask *new) struct sw_flow_mask *new)
{ {
......
...@@ -55,7 +55,7 @@ struct flow_table { ...@@ -55,7 +55,7 @@ struct flow_table {
int ovs_flow_init(void); int ovs_flow_init(void);
void ovs_flow_exit(void); void ovs_flow_exit(void);
struct sw_flow *ovs_flow_alloc(void); struct sw_flow *ovs_flow_alloc(bool percpu_stats);
void ovs_flow_free(struct sw_flow *, bool deferred); void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *); int ovs_flow_tbl_init(struct flow_table *);
...@@ -69,9 +69,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); ...@@ -69,9 +69,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
int ovs_flow_tbl_num_masks(const struct flow_table *table); int ovs_flow_tbl_num_masks(const struct flow_table *table);
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
u32 *bucket, u32 *idx); u32 *bucket, u32 *idx);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
const struct sw_flow_key *, const struct sw_flow_key *,
u32 *n_mask_hit); u32 *n_mask_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
struct sw_flow_match *match); struct sw_flow_match *match);
......
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
#include "vport.h" #include "vport.h"
#include "vport-internal_dev.h" #include "vport-internal_dev.h"
static void ovs_vport_record_error(struct vport *,
enum vport_err_type err_type);
/* List of statically compiled vport implementations. Don't forget to also /* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the bottom of vport.h. */ * add yours to the list at the bottom of vport.h. */
static const struct vport_ops *vport_ops_list[] = { static const struct vport_ops *vport_ops_list[] = {
...@@ -396,7 +399,8 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) ...@@ -396,7 +399,8 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
* If using the vport generic stats layer indicate that an error of the given * If using the vport generic stats layer indicate that an error of the given
* type has occurred. * type has occurred.
*/ */
void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) static void ovs_vport_record_error(struct vport *vport,
enum vport_err_type err_type)
{ {
spin_lock(&vport->stats_lock); spin_lock(&vport->stats_lock);
......
...@@ -192,7 +192,6 @@ static inline struct vport *vport_from_priv(const void *priv) ...@@ -192,7 +192,6 @@ static inline struct vport *vport_from_priv(const void *priv)
void ovs_vport_receive(struct vport *, struct sk_buff *, void ovs_vport_receive(struct vport *, struct sk_buff *,
struct ovs_key_ipv4_tunnel *); struct ovs_key_ipv4_tunnel *);
void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
/* List of statically compiled vport implementations. Don't forget to also /* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */ * add yours to the list at the top of vport.c. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment