Commit aef833c5 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso Committed by David S. Miller

net: openvswitch: rename flow_stats to sw_flow_stats

There is a flow_stats structure defined in include/net/flow_offload.h
and a follow up patch adds #include <net/flow_offload.h> to
net/sch_generic.h.

This breaks compilation since OVS codebase includes net/sock.h which
pulls in linux/filter.h which includes net/sch_generic.h.

In file included from ./include/net/sch_generic.h:18:0,
                 from ./include/linux/filter.h:25,
                 from ./include/net/sock.h:59,
                 from ./include/linux/tcp.h:19,
                 from net/openvswitch/datapath.c:24

This definition takes precedence on OVS since it is placed in the
networking core, so rename flow_stats in OVS to sw_flow_stats since
this structure is contained in sw_flow.
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9a2f97bb
...@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) ...@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
struct flow_stats *stats; struct sw_flow_stats *stats;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
...@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, ...@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(flow->stats_last_writer != -1) && if (likely(flow->stats_last_writer != -1) &&
likely(!rcu_access_pointer(flow->stats[cpu]))) { likely(!rcu_access_pointer(flow->stats[cpu]))) {
/* Try to allocate CPU-specific stats. */ /* Try to allocate CPU-specific stats. */
struct flow_stats *new_stats; struct sw_flow_stats *new_stats;
new_stats = new_stats =
kmem_cache_alloc_node(flow_stats_cache, kmem_cache_alloc_node(flow_stats_cache,
...@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow, ...@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* We open code this to make sure cpu 0 is always considered */ /* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) { if (stats) {
/* Local CPU may write on non-local stats, so we must /* Local CPU may write on non-local stats, so we must
...@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow) ...@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
/* We open code this to make sure cpu 0 is always considered */ /* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) { if (stats) {
spin_lock_bh(&stats->lock); spin_lock_bh(&stats->lock);
......
...@@ -194,7 +194,7 @@ struct sw_flow_actions { ...@@ -194,7 +194,7 @@ struct sw_flow_actions {
struct nlattr actions[]; struct nlattr actions[];
}; };
struct flow_stats { struct sw_flow_stats {
u64 packet_count; /* Number of packets matched. */ u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */ u64 byte_count; /* Number of bytes matched. */
unsigned long used; /* Last used time (in jiffies). */ unsigned long used; /* Last used time (in jiffies). */
...@@ -216,7 +216,7 @@ struct sw_flow { ...@@ -216,7 +216,7 @@ struct sw_flow {
struct cpumask cpu_used_mask; struct cpumask cpu_used_mask;
struct sw_flow_mask *mask; struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts; struct sw_flow_actions __rcu *sf_acts;
struct flow_stats __rcu *stats[]; /* One for each CPU. First one struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time, * is allocated at flow creation time,
* the rest are allocated on demand * the rest are allocated on demand
* while holding the 'stats[0].lock'. * while holding the 'stats[0].lock'.
......
...@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, ...@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
struct sw_flow *ovs_flow_alloc(void) struct sw_flow *ovs_flow_alloc(void)
{ {
struct sw_flow *flow; struct sw_flow *flow;
struct flow_stats *stats; struct sw_flow_stats *stats;
flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow) if (!flow)
...@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow) ...@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
if (flow->stats[cpu]) if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache, kmem_cache_free(flow_stats_cache,
(struct flow_stats __force *)flow->stats[cpu]); (struct sw_flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow); kmem_cache_free(flow_cache, flow);
} }
...@@ -712,13 +712,13 @@ int ovs_flow_init(void) ...@@ -712,13 +712,13 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids + (nr_cpu_ids
* sizeof(struct flow_stats *)), * sizeof(struct sw_flow_stats *)),
0, 0, NULL); 0, 0, NULL);
if (flow_cache == NULL) if (flow_cache == NULL)
return -ENOMEM; return -ENOMEM;
flow_stats_cache flow_stats_cache
= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL); 0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) { if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache); kmem_cache_destroy(flow_cache);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment