Commit 6f6e8414 authored by Jesse Gross's avatar Jesse Gross Committed by Greg Kroah-Hartman

openvswitch: Zero flows on allocation.

[ Upstream commit ae5f2fb1 ]

When support for megaflows was introduced, OVS needed to start
installing flows with a mask applied to them. Since masking is an
expensive operation, OVS also had an optimization that would only
take the parts of the flow keys that were covered by a non-zero
mask. The values stored in the remaining pieces should not matter
because they are masked out.

While this works fine for the purposes of matching (which must always
look at the mask), serialization to netlink can be problematic. Since
the flow and the mask are serialized separately, the uninitialized
portions of the flow can be encoded with whatever values happen to be
present.

In terms of functionality, this has little effect since these fields
will be masked out by definition. However, it leaks kernel memory to
userspace, which is a potential security vulnerability. It is also
possible that other code paths could look at the masked key and get
uninitialized data, although this does not currently appear to be an
issue in practice.

This removes the mask optimization for flows that are being installed.
This was always intended to be the case as the mask optimizations were
really targetting per-packet flow operations.

Fixes: 03f0d916 ("openvswitch: Mega flow implementation")
Signed-off-by: default avatarJesse Gross <jesse@nicira.com>
Acked-by: default avatarPravin B Shelar <pshelar@nicira.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 62f575aa
...@@ -803,7 +803,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) ...@@ -803,7 +803,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(acts)) if (IS_ERR(acts))
goto error; goto error;
ovs_flow_mask_key(&masked_key, &key, &mask); ovs_flow_mask_key(&masked_key, &key, true, &mask);
error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
&masked_key, 0, &acts); &masked_key, 0, &acts);
if (error) { if (error) {
......
...@@ -55,18 +55,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) ...@@ -55,18 +55,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
} }
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask) bool full, const struct sw_flow_mask *mask)
{ {
const long *m = (long *)((u8 *)&mask->key + mask->range.start); int start = full ? 0 : mask->range.start;
const long *s = (long *)((u8 *)src + mask->range.start); int len = full ? sizeof *dst : range_n_bytes(&mask->range);
long *d = (long *)((u8 *)dst + mask->range.start); const long *m = (const long *)((const u8 *)&mask->key + start);
const long *s = (const long *)((const u8 *)src + start);
long *d = (long *)((u8 *)dst + start);
int i; int i;
/* The memory outside of the 'mask->range' are not set since /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
* further operations on 'dst' only uses contents within * if 'full' is false the memory outside of the 'mask->range' is left
* 'mask->range'. * uninitialized. This can be used as an optimization when further
* operations on 'dst' only use contents within 'mask->range'.
*/ */
for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) for (i = 0; i < len; i += sizeof(long))
*d++ = *s++ & *m++; *d++ = *s++ & *m++;
} }
...@@ -436,7 +439,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, ...@@ -436,7 +439,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
u32 hash; u32 hash;
struct sw_flow_key masked_key; struct sw_flow_key masked_key;
ovs_flow_mask_key(&masked_key, unmasked, mask); ovs_flow_mask_key(&masked_key, unmasked, false, mask);
hash = flow_hash(&masked_key, key_start, key_end); hash = flow_hash(&masked_key, key_start, key_end);
head = find_bucket(ti, hash); head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
......
...@@ -79,5 +79,5 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, ...@@ -79,5 +79,5 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
struct sw_flow_match *match); struct sw_flow_match *match);
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask); bool full, const struct sw_flow_mask *mask);
#endif /* flow_table.h */ #endif /* flow_table.h */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment