Commit c70ed3f7 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-openvswitch-improve-the-codes'

Tonghao Zhang says:

====================
net: openvswitch: improve the codes

This series patches are not bug fix, just improve codes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 34e1ec31 e6896163
...@@ -741,7 +741,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, ...@@ -741,7 +741,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0; return 0;
} }
static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb) static int ovs_vport_output(struct net *net, struct sock *sk,
struct sk_buff *skb)
{ {
struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage); struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
struct vport *vport = data->vport; struct vport *vport = data->vport;
...@@ -920,7 +921,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, ...@@ -920,7 +921,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
upcall.mru = OVS_CB(skb)->mru; upcall.mru = OVS_CB(skb)->mru;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0; for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) { a = nla_next(a, &rem)) {
switch (nla_type(a)) { switch (nla_type(a)) {
case OVS_USERSPACE_ATTR_USERDATA: case OVS_USERSPACE_ATTR_USERDATA:
upcall.userdata = a; upcall.userdata = a;
......
...@@ -182,7 +182,7 @@ struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) ...@@ -182,7 +182,7 @@ struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
head = vport_hash_bucket(dp, port_no); head = vport_hash_bucket(dp, port_no);
hlist_for_each_entry_rcu(vport, head, dp_hash_node, hlist_for_each_entry_rcu(vport, head, dp_hash_node,
lockdep_ovsl_is_held()) { lockdep_ovsl_is_held()) {
if (vport->port_no == port_no) if (vport->port_no == port_no)
return vport; return vport;
} }
...@@ -254,7 +254,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) ...@@ -254,7 +254,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
error = ovs_execute_actions(dp, skb, sf_acts, key); error = ovs_execute_actions(dp, skb, sf_acts, key);
if (unlikely(error)) if (unlikely(error))
net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n", net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
ovs_dp_name(dp), error); ovs_dp_name(dp), error);
stats_counter = &stats->n_hit; stats_counter = &stats->n_hit;
...@@ -302,7 +302,7 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, ...@@ -302,7 +302,7 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key, const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info, const struct dp_upcall_info *upcall_info,
uint32_t cutlen) uint32_t cutlen)
{ {
unsigned int gso_type = skb_shinfo(skb)->gso_type; unsigned int gso_type = skb_shinfo(skb)->gso_type;
struct sw_flow_key later_key; struct sw_flow_key later_key;
...@@ -1080,11 +1080,12 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) ...@@ -1080,11 +1080,12 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
} }
/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net, static noinline_for_stack
const struct nlattr *a, struct sw_flow_actions *get_flow_actions(struct net *net,
const struct sw_flow_key *key, const struct nlattr *a,
const struct sw_flow_mask *mask, const struct sw_flow_key *key,
bool log) const struct sw_flow_mask *mask,
bool log)
{ {
struct sw_flow_actions *acts; struct sw_flow_actions *acts;
struct sw_flow_key masked_key; struct sw_flow_key masked_key;
...@@ -1383,7 +1384,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) ...@@ -1383,7 +1384,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
ovs_notify(&dp_flow_genl_family, reply, info); ovs_notify(&dp_flow_genl_family, reply, info);
} else { } else {
netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply)); netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0,
PTR_ERR(reply));
} }
} }
...@@ -1513,7 +1515,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, ...@@ -1513,7 +1515,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
int err; int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
flags, cmd); flags, cmd);
if (!ovs_header) if (!ovs_header)
goto error; goto error;
...@@ -1572,11 +1574,13 @@ static struct datapath *lookup_datapath(struct net *net, ...@@ -1572,11 +1574,13 @@ static struct datapath *lookup_datapath(struct net *net,
return dp ? dp : ERR_PTR(-ENODEV); return dp ? dp : ERR_PTR(-ENODEV);
} }
static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info) static void ovs_dp_reset_user_features(struct sk_buff *skb,
struct genl_info *info)
{ {
struct datapath *dp; struct datapath *dp;
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); dp = lookup_datapath(sock_net(skb->sk), info->userhdr,
info->attrs);
if (IS_ERR(dp)) if (IS_ERR(dp))
return; return;
...@@ -2075,7 +2079,7 @@ static unsigned int ovs_get_max_headroom(struct datapath *dp) ...@@ -2075,7 +2079,7 @@ static unsigned int ovs_get_max_headroom(struct datapath *dp)
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node, hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
lockdep_ovsl_is_held()) { lockdep_ovsl_is_held()) {
dev = vport->dev; dev = vport->dev;
dev_headroom = netdev_get_fwd_headroom(dev); dev_headroom = netdev_get_fwd_headroom(dev);
if (dev_headroom > max_headroom) if (dev_headroom > max_headroom)
...@@ -2093,10 +2097,11 @@ static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom) ...@@ -2093,10 +2097,11 @@ static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
int i; int i;
dp->max_headroom = new_headroom; dp->max_headroom = new_headroom;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node, hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
lockdep_ovsl_is_held()) lockdep_ovsl_is_held())
netdev_set_rx_headroom(vport->dev, new_headroom); netdev_set_rx_headroom(vport->dev, new_headroom);
}
} }
static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
...@@ -2551,7 +2556,8 @@ static int __init dp_init(void) ...@@ -2551,7 +2556,8 @@ static int __init dp_init(void)
{ {
int err; int err;
BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof_field(struct sk_buff, cb)); BUILD_BUG_ON(sizeof(struct ovs_skb_cb) >
sizeof_field(struct sk_buff, cb));
pr_info("Open vSwitch switching datapath\n"); pr_info("Open vSwitch switching datapath\n");
......
...@@ -111,12 +111,16 @@ static void flow_free(struct sw_flow *flow) ...@@ -111,12 +111,16 @@ static void flow_free(struct sw_flow *flow)
if (ovs_identifier_is_key(&flow->id)) if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key); kfree(flow->id.unmasked_key);
if (flow->sf_acts) if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts); ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
flow->sf_acts);
/* We open code this to make sure cpu 0 is always considered */ /* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
if (flow->stats[cpu]) if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache, kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]); (struct sw_flow_stats __force *)flow->stats[cpu]);
}
kmem_cache_free(flow_cache, flow); kmem_cache_free(flow_cache, flow);
} }
...@@ -164,7 +168,6 @@ static struct table_instance *table_instance_alloc(int new_size) ...@@ -164,7 +168,6 @@ static struct table_instance *table_instance_alloc(int new_size)
ti->n_buckets = new_size; ti->n_buckets = new_size;
ti->node_ver = 0; ti->node_ver = 0;
ti->keep_flows = false;
get_random_bytes(&ti->hash_seed, sizeof(u32)); get_random_bytes(&ti->hash_seed, sizeof(u32));
return ti; return ti;
...@@ -192,7 +195,7 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma) ...@@ -192,7 +195,7 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
* zero based counter we store the value at reset, and subtract it * zero based counter we store the value at reset, and subtract it
* later when processing. * later when processing.
*/ */
for (i = 0; i < ma->max; i++) { for (i = 0; i < ma->max; i++) {
ma->masks_usage_zero_cntr[i] = 0; ma->masks_usage_zero_cntr[i] = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
...@@ -273,7 +276,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl, ...@@ -273,7 +276,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
if (ma_count >= ma->max) { if (ma_count >= ma->max) {
err = tbl_mask_array_realloc(tbl, ma->max + err = tbl_mask_array_realloc(tbl, ma->max +
MASK_ARRAY_SIZE_MIN); MASK_ARRAY_SIZE_MIN);
if (err) if (err)
return err; return err;
...@@ -288,7 +291,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl, ...@@ -288,7 +291,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
BUG_ON(ovsl_dereference(ma->masks[ma_count])); BUG_ON(ovsl_dereference(ma->masks[ma_count]));
rcu_assign_pointer(ma->masks[ma_count], new); rcu_assign_pointer(ma->masks[ma_count], new);
WRITE_ONCE(ma->count, ma_count +1); WRITE_ONCE(ma->count, ma_count + 1);
return 0; return 0;
} }
...@@ -309,10 +312,10 @@ static void tbl_mask_array_del_mask(struct flow_table *tbl, ...@@ -309,10 +312,10 @@ static void tbl_mask_array_del_mask(struct flow_table *tbl,
return; return;
found: found:
WRITE_ONCE(ma->count, ma_count -1); WRITE_ONCE(ma->count, ma_count - 1);
rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]); rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
RCU_INIT_POINTER(ma->masks[ma_count -1], NULL); RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
kfree_rcu(mask, rcu); kfree_rcu(mask, rcu);
...@@ -448,26 +451,23 @@ int ovs_flow_tbl_init(struct flow_table *table) ...@@ -448,26 +451,23 @@ int ovs_flow_tbl_init(struct flow_table *table)
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{ {
struct table_instance *ti = container_of(rcu, struct table_instance, rcu); struct table_instance *ti;
ti = container_of(rcu, struct table_instance, rcu);
__table_instance_destroy(ti); __table_instance_destroy(ti);
} }
static void table_instance_flow_free(struct flow_table *table, static void table_instance_flow_free(struct flow_table *table,
struct table_instance *ti, struct table_instance *ti,
struct table_instance *ufid_ti, struct table_instance *ufid_ti,
struct sw_flow *flow, struct sw_flow *flow)
bool count)
{ {
hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
if (count) table->count--;
table->count--;
if (ovs_identifier_is_ufid(&flow->id)) { if (ovs_identifier_is_ufid(&flow->id)) {
hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
table->ufid_count--;
if (count)
table->ufid_count--;
} }
flow_mask_remove(table, flow->mask); flow_mask_remove(table, flow->mask);
...@@ -480,22 +480,25 @@ void table_instance_flow_flush(struct flow_table *table, ...@@ -480,22 +480,25 @@ void table_instance_flow_flush(struct flow_table *table,
{ {
int i; int i;
if (ti->keep_flows)
return;
for (i = 0; i < ti->n_buckets; i++) { for (i = 0; i < ti->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head = &ti->buckets[i]; struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n; struct hlist_node *n;
struct sw_flow *flow;
hlist_for_each_entry_safe(flow, n, head, hlist_for_each_entry_safe(flow, n, head,
flow_table.node[ti->node_ver]) { flow_table.node[ti->node_ver]) {
table_instance_flow_free(table, ti, ufid_ti, table_instance_flow_free(table, ti, ufid_ti,
flow, false); flow);
ovs_flow_free(flow, true); ovs_flow_free(flow, true);
} }
} }
if (WARN_ON(table->count != 0 ||
table->ufid_count != 0)) {
table->count = 0;
table->ufid_count = 0;
}
} }
static void table_instance_destroy(struct table_instance *ti, static void table_instance_destroy(struct table_instance *ti,
...@@ -596,8 +599,6 @@ static void flow_table_copy_flows(struct table_instance *old, ...@@ -596,8 +599,6 @@ static void flow_table_copy_flows(struct table_instance *old,
lockdep_ovsl_is_held()) lockdep_ovsl_is_held())
table_instance_insert(new, flow); table_instance_insert(new, flow);
} }
old->keep_flows = true;
} }
static struct table_instance *table_instance_rehash(struct table_instance *ti, static struct table_instance *table_instance_rehash(struct table_instance *ti,
...@@ -632,8 +633,6 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table) ...@@ -632,8 +633,6 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
rcu_assign_pointer(flow_table->ti, new_ti); rcu_assign_pointer(flow_table->ti, new_ti);
rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
flow_table->last_rehash = jiffies; flow_table->last_rehash = jiffies;
flow_table->count = 0;
flow_table->ufid_count = 0;
table_instance_flow_flush(flow_table, old_ti, old_ufid_ti); table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
table_instance_destroy(old_ti, old_ufid_ti); table_instance_destroy(old_ti, old_ufid_ti);
...@@ -661,7 +660,7 @@ static int flow_key_start(const struct sw_flow_key *key) ...@@ -661,7 +660,7 @@ static int flow_key_start(const struct sw_flow_key *key)
return 0; return 0;
else else
return rounddown(offsetof(struct sw_flow_key, phy), return rounddown(offsetof(struct sw_flow_key, phy),
sizeof(long)); sizeof(long));
} }
static bool cmp_key(const struct sw_flow_key *key1, static bool cmp_key(const struct sw_flow_key *key1,
...@@ -673,7 +672,7 @@ static bool cmp_key(const struct sw_flow_key *key1, ...@@ -673,7 +672,7 @@ static bool cmp_key(const struct sw_flow_key *key1,
long diffs = 0; long diffs = 0;
int i; int i;
for (i = key_start; i < key_end; i += sizeof(long)) for (i = key_start; i < key_end; i += sizeof(long))
diffs |= *cp1++ ^ *cp2++; diffs |= *cp1++ ^ *cp2++;
return diffs == 0; return diffs == 0;
...@@ -713,7 +712,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, ...@@ -713,7 +712,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
(*n_mask_hit)++; (*n_mask_hit)++;
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
lockdep_ovsl_is_held()) { lockdep_ovsl_is_held()) {
if (flow->mask == mask && flow->flow_table.hash == hash && if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range)) flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow; return flow;
...@@ -897,7 +896,8 @@ static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, ...@@ -897,7 +896,8 @@ static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
} }
bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match) bool ovs_flow_cmp(const struct sw_flow *flow,
const struct sw_flow_match *match)
{ {
if (ovs_identifier_is_ufid(&flow->id)) if (ovs_identifier_is_ufid(&flow->id))
return flow_cmp_masked_key(flow, match->key, &match->range); return flow_cmp_masked_key(flow, match->key, &match->range);
...@@ -916,7 +916,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, ...@@ -916,7 +916,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
hash = ufid_hash(ufid); hash = ufid_hash(ufid);
head = find_bucket(ti, hash); head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
lockdep_ovsl_is_held()) { lockdep_ovsl_is_held()) {
if (flow->ufid_table.hash == hash && if (flow->ufid_table.hash == hash &&
ovs_flow_cmp_ufid(flow, ufid)) ovs_flow_cmp_ufid(flow, ufid))
return flow; return flow;
...@@ -950,7 +950,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) ...@@ -950,7 +950,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0); BUG_ON(table->count == 0);
table_instance_flow_free(table, ti, ufid_ti, flow, true); table_instance_flow_free(table, ti, ufid_ti, flow);
} }
static struct sw_flow_mask *mask_alloc(void) static struct sw_flow_mask *mask_alloc(void)
...@@ -1107,7 +1107,7 @@ void ovs_flow_masks_rebalance(struct flow_table *table) ...@@ -1107,7 +1107,7 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
if (!masks_and_count) if (!masks_and_count)
return; return;
for (i = 0; i < ma->max; i++) { for (i = 0; i < ma->max; i++) {
struct sw_flow_mask *mask; struct sw_flow_mask *mask;
unsigned int start; unsigned int start;
int cpu; int cpu;
......
...@@ -53,7 +53,6 @@ struct table_instance { ...@@ -53,7 +53,6 @@ struct table_instance {
struct rcu_head rcu; struct rcu_head rcu;
int node_ver; int node_ver;
u32 hash_seed; u32 hash_seed;
bool keep_flows;
}; };
struct flow_table { struct flow_table {
......
...@@ -98,7 +98,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name) ...@@ -98,7 +98,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
struct vport *vport; struct vport *vport;
hlist_for_each_entry_rcu(vport, bucket, hash_node, hlist_for_each_entry_rcu(vport, bucket, hash_node,
lockdep_ovsl_is_held()) lockdep_ovsl_is_held())
if (!strcmp(name, ovs_vport_name(vport)) && if (!strcmp(name, ovs_vport_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net)) net_eq(ovs_dp_get_net(vport->dp), net))
return vport; return vport;
...@@ -118,7 +118,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name) ...@@ -118,7 +118,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
* vport_free(). * vport_free().
*/ */
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
const struct vport_parms *parms) const struct vport_parms *parms)
{ {
struct vport *vport; struct vport *vport;
size_t alloc_size; size_t alloc_size;
...@@ -397,7 +397,8 @@ int ovs_vport_get_upcall_portids(const struct vport *vport, ...@@ -397,7 +397,8 @@ int ovs_vport_get_upcall_portids(const struct vport *vport,
* *
* Returns the portid of the target socket. Must be called with rcu_read_lock. * Returns the portid of the target socket. Must be called with rcu_read_lock.
*/ */
u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) u32 ovs_vport_find_upcall_portid(const struct vport *vport,
struct sk_buff *skb)
{ {
struct vport_portids *ids; struct vport_portids *ids;
u32 ids_index; u32 ids_index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment