Commit 9d2f627b authored by Eelco Chaudron's avatar Eelco Chaudron Committed by David S. Miller

net: openvswitch: add masks cache hit counter

Add a counter that counts the number of masks cache hits, and
export it through the megaflow netlink statistics.
Reviewed-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarTonghao Zhang <xiangxia.m.yue@gmail.com>
Signed-off-by: default avatarEelco Chaudron <echaudro@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d6526926
...@@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats { ...@@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats {
__u64 n_mask_hit; /* Number of masks used for flow lookups. */ __u64 n_mask_hit; /* Number of masks used for flow lookups. */
__u32 n_masks; /* Number of masks for the datapath. */ __u32 n_masks; /* Number of masks for the datapath. */
__u32 pad0; /* Pad for future expension. */ __u32 pad0; /* Pad for future expension. */
__u64 n_cache_hit; /* Number of cache matches for flow lookups. */
__u64 pad1; /* Pad for future expension. */ __u64 pad1; /* Pad for future expension. */
__u64 pad2; /* Pad for future expension. */
}; };
struct ovs_vport_stats { struct ovs_vport_stats {
......
...@@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) ...@@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
struct dp_stats_percpu *stats; struct dp_stats_percpu *stats;
u64 *stats_counter; u64 *stats_counter;
u32 n_mask_hit; u32 n_mask_hit;
u32 n_cache_hit;
int error; int error;
stats = this_cpu_ptr(dp->stats_percpu); stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */ /* Look up flow. */
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb), flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
&n_mask_hit); &n_mask_hit, &n_cache_hit);
if (unlikely(!flow)) { if (unlikely(!flow)) {
struct dp_upcall_info upcall; struct dp_upcall_info upcall;
...@@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) ...@@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
(*stats_counter)++; (*stats_counter)++;
stats->n_mask_hit += n_mask_hit; stats->n_mask_hit += n_mask_hit;
stats->n_cache_hit += n_cache_hit;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} }
...@@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, ...@@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
stats->n_missed += local_stats.n_missed; stats->n_missed += local_stats.n_missed;
stats->n_lost += local_stats.n_lost; stats->n_lost += local_stats.n_lost;
mega_stats->n_mask_hit += local_stats.n_mask_hit; mega_stats->n_mask_hit += local_stats.n_mask_hit;
mega_stats->n_cache_hit += local_stats.n_cache_hit;
} }
} }
......
...@@ -38,12 +38,15 @@ ...@@ -38,12 +38,15 @@
* @n_mask_hit: Number of masks looked up for flow match. * @n_mask_hit: Number of masks looked up for flow match.
* @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
* up per packet. * up per packet.
* @n_cache_hit: The number of received packets that had their mask found using
* the mask cache.
*/ */
struct dp_stats_percpu { struct dp_stats_percpu {
u64 n_hit; u64 n_hit;
u64 n_missed; u64 n_missed;
u64 n_lost; u64 n_lost;
u64 n_mask_hit; u64 n_mask_hit;
u64 n_cache_hit;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
}; };
......
...@@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, ...@@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct mask_array *ma, struct mask_array *ma,
const struct sw_flow_key *key, const struct sw_flow_key *key,
u32 *n_mask_hit, u32 *n_mask_hit,
u32 *n_cache_hit,
u32 *index) u32 *index)
{ {
u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
...@@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, ...@@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u64_stats_update_begin(&ma->syncp); u64_stats_update_begin(&ma->syncp);
usage_counters[*index]++; usage_counters[*index]++;
u64_stats_update_end(&ma->syncp); u64_stats_update_end(&ma->syncp);
(*n_cache_hit)++;
return flow; return flow;
} }
} }
...@@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, ...@@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key, const struct sw_flow_key *key,
u32 skb_hash, u32 skb_hash,
u32 *n_mask_hit) u32 *n_mask_hit,
u32 *n_cache_hit)
{ {
struct mask_array *ma = rcu_dereference(tbl->mask_array); struct mask_array *ma = rcu_dereference(tbl->mask_array);
struct table_instance *ti = rcu_dereference(tbl->ti); struct table_instance *ti = rcu_dereference(tbl->ti);
...@@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, ...@@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
int seg; int seg;
*n_mask_hit = 0; *n_mask_hit = 0;
*n_cache_hit = 0;
if (unlikely(!skb_hash)) { if (unlikely(!skb_hash)) {
u32 mask_index = 0; u32 mask_index = 0;
u32 cache = 0;
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
&mask_index);
} }
/* Pre and post recirulation flows usually have the same skb_hash /* Pre and post recirulation flows usually have the same skb_hash
...@@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, ...@@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
e = &entries[index]; e = &entries[index];
if (e->skb_hash == skb_hash) { if (e->skb_hash == skb_hash) {
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
&e->mask_index); n_cache_hit, &e->mask_index);
if (!flow) if (!flow)
e->skb_hash = 0; e->skb_hash = 0;
return flow; return flow;
...@@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, ...@@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
} }
/* Cache miss, do full lookup. */ /* Cache miss, do full lookup. */
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index); flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
&ce->mask_index);
if (flow) if (flow)
ce->skb_hash = skb_hash; ce->skb_hash = skb_hash;
*n_cache_hit = 0;
return flow; return flow;
} }
...@@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, ...@@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit; u32 __always_unused n_mask_hit;
u32 __always_unused n_cache_hit;
u32 index = 0; u32 index = 0;
return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
} }
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
......
...@@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, ...@@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
const struct sw_flow_key *, const struct sw_flow_key *,
u32 skb_hash, u32 skb_hash,
u32 *n_mask_hit); u32 *n_mask_hit,
u32 *n_cache_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *); const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment