Commit 57f7d7b9 authored by Tonghao Zhang's avatar Tonghao Zhang Committed by David S. Miller

net: openvswitch: optimize flow-mask looking up

The full looking up on flow table traverses all mask array.
If mask-array is too large, the number of invalid flow-mask
increase, performance will be drop.

One bad case, for example: M means flow-mask is valid and NULL
of flow-mask means deleted.

+-------------------------------------------+
| M | NULL | ...                  | NULL | M|
+-------------------------------------------+

In that case, without this patch, openvswitch will traverses all
mask array, because there will be one flow-mask in the tail. This
patch changes the way of flow-mask inserting and deleting, and the
mask array will be keep as below: there is not a NULL hole. In the
fast path, we can "break" "for" (not "continue") in flow_lookup
when we get a NULL flow-mask.

         "break"
            v
+-------------------------------------------+
| M | M |  NULL |...           | NULL | NULL|
+-------------------------------------------+

This patch don't optimize slow or control path, still using ma->max
to traverse. Slow path:
* tbl_mask_array_realloc
* ovs_flow_tbl_lookup_exact
* flow_mask_find
Signed-off-by: default avatarTonghao Zhang <xiangxia.m.yue@gmail.com>
Tested-by: default avatarGreg Rose <gvrose8192@gmail.com>
Acked-by: default avatarPravin B Shelar <pshelar@ovn.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a7f35e78
...@@ -518,8 +518,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, ...@@ -518,8 +518,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u32 *n_mask_hit, u32 *n_mask_hit,
u32 *index) u32 *index)
{ {
struct sw_flow_mask *mask;
struct sw_flow *flow; struct sw_flow *flow;
struct sw_flow_mask *mask;
int i; int i;
if (*index < ma->max) { if (*index < ma->max) {
...@@ -538,7 +538,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, ...@@ -538,7 +538,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
mask = rcu_dereference_ovsl(ma->masks[i]); mask = rcu_dereference_ovsl(ma->masks[i]);
if (!mask) if (!mask)
continue; break;
flow = masked_flow_lookup(ti, key, mask, n_mask_hit); flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) { /* Found */ if (flow) { /* Found */
...@@ -695,8 +695,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, ...@@ -695,8 +695,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
int ovs_flow_tbl_num_masks(const struct flow_table *table) int ovs_flow_tbl_num_masks(const struct flow_table *table)
{ {
struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
return READ_ONCE(ma->count);
return ma->count;
} }
static struct table_instance *table_instance_expand(struct table_instance *ti, static struct table_instance *table_instance_expand(struct table_instance *ti,
...@@ -705,21 +704,33 @@ static struct table_instance *table_instance_expand(struct table_instance *ti, ...@@ -705,21 +704,33 @@ static struct table_instance *table_instance_expand(struct table_instance *ti,
return table_instance_rehash(ti, ti->n_buckets * 2, ufid); return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
} }
static void tbl_mask_array_delete_mask(struct mask_array *ma, static void tbl_mask_array_del_mask(struct flow_table *tbl,
struct sw_flow_mask *mask) struct sw_flow_mask *mask)
{ {
int i; struct mask_array *ma = ovsl_dereference(tbl->mask_array);
int i, ma_count = READ_ONCE(ma->count);
/* Remove the deleted mask pointers from the array */ /* Remove the deleted mask pointers from the array */
for (i = 0; i < ma->max; i++) { for (i = 0; i < ma_count; i++) {
if (mask == ovsl_dereference(ma->masks[i])) { if (mask == ovsl_dereference(ma->masks[i]))
RCU_INIT_POINTER(ma->masks[i], NULL); goto found;
ma->count--;
kfree_rcu(mask, rcu);
return;
}
} }
BUG(); BUG();
return;
found:
WRITE_ONCE(ma->count, ma_count -1);
rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
kfree_rcu(mask, rcu);
/* Shrink the mask array if necessary. */
if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
ma_count <= (ma->max / 3))
tbl_mask_array_realloc(tbl, ma->max / 2);
} }
/* Remove 'mask' from the mask list, if it is not needed any more. */ /* Remove 'mask' from the mask list, if it is not needed any more. */
...@@ -733,17 +744,8 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) ...@@ -733,17 +744,8 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
BUG_ON(!mask->ref_count); BUG_ON(!mask->ref_count);
mask->ref_count--; mask->ref_count--;
if (!mask->ref_count) { if (!mask->ref_count)
struct mask_array *ma; tbl_mask_array_del_mask(tbl, mask);
ma = ovsl_dereference(tbl->mask_array);
tbl_mask_array_delete_mask(ma, mask);
/* Shrink the mask array if necessary. */
if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
ma->count <= (ma->max / 3))
tbl_mask_array_realloc(tbl, ma->max / 2);
}
} }
} }
...@@ -807,6 +809,29 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, ...@@ -807,6 +809,29 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
return NULL; return NULL;
} }
static int tbl_mask_array_add_mask(struct flow_table *tbl,
struct sw_flow_mask *new)
{
struct mask_array *ma = ovsl_dereference(tbl->mask_array);
int err, ma_count = READ_ONCE(ma->count);
if (ma_count >= ma->max) {
err = tbl_mask_array_realloc(tbl, ma->max +
MASK_ARRAY_SIZE_MIN);
if (err)
return err;
ma = ovsl_dereference(tbl->mask_array);
}
BUG_ON(ovsl_dereference(ma->masks[ma_count]));
rcu_assign_pointer(ma->masks[ma_count], new);
WRITE_ONCE(ma->count, ma_count +1);
return 0;
}
/* Add 'mask' into the mask list, if it is not already there. */ /* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
const struct sw_flow_mask *new) const struct sw_flow_mask *new)
...@@ -815,9 +840,6 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, ...@@ -815,9 +840,6 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
mask = flow_mask_find(tbl, new); mask = flow_mask_find(tbl, new);
if (!mask) { if (!mask) {
struct mask_array *ma;
int i;
/* Allocate a new mask if none exsits. */ /* Allocate a new mask if none exsits. */
mask = mask_alloc(); mask = mask_alloc();
if (!mask) if (!mask)
...@@ -826,29 +848,9 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, ...@@ -826,29 +848,9 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
mask->range = new->range; mask->range = new->range;
/* Add mask to mask-list. */ /* Add mask to mask-list. */
ma = ovsl_dereference(tbl->mask_array); if (tbl_mask_array_add_mask(tbl, mask)) {
if (ma->count >= ma->max) { kfree(mask);
int err; return -ENOMEM;
err = tbl_mask_array_realloc(tbl, ma->max +
MASK_ARRAY_SIZE_MIN);
if (err) {
kfree(mask);
return err;
}
ma = ovsl_dereference(tbl->mask_array);
}
for (i = 0; i < ma->max; i++) {
const struct sw_flow_mask *t;
t = ovsl_dereference(ma->masks[i]);
if (!t) {
rcu_assign_pointer(ma->masks[i], mask);
ma->count++;
break;
}
} }
} else { } else {
BUG_ON(!mask->ref_count); BUG_ON(!mask->ref_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment