Commit b124f413 authored by David S. Miller's avatar David S. Miller

Merge tag 'batadv-net-for-davem-20170316' of git://git.open-mesh.org/linux-merge

Simon Wunderlich says:

====================
Here are two batman-adv bugfixes:

 - Keep fragments equally sized, avoids some problems with too small fragments,
   by Sven Eckelmann

 - Initialize gateway class correctly when BATMAN V is compiled in,
   by Sven Eckelmann
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8f3dbfd7 1a9070ec
...@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) ...@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
batadv_iv_ogm_schedule(hard_iface); batadv_iv_ogm_schedule(hard_iface);
} }
/**
* batadv_iv_init_sel_class - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
{
/* set default TQ difference threshold to 20 */
atomic_set(&bat_priv->gw.sel_class, 20);
}
static struct batadv_gw_node * static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{ {
...@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = { ...@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.del_if = batadv_iv_ogm_orig_del_if, .del_if = batadv_iv_ogm_orig_del_if,
}, },
.gw = { .gw = {
.init_sel_class = batadv_iv_init_sel_class,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node, .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible, .is_eligible = batadv_iv_gw_is_eligible,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS #ifdef CONFIG_BATMAN_ADV_DEBUGFS
......
...@@ -668,6 +668,16 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1, ...@@ -668,6 +668,16 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret; return ret;
} }
/**
* batadv_v_init_sel_class - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
{
/* set default throughput difference threshold to 5Mbps */
atomic_set(&bat_priv->gw.sel_class, 50);
}
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
char *buff, size_t count) char *buff, size_t count)
{ {
...@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = { ...@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.dump = batadv_v_orig_dump, .dump = batadv_v_orig_dump,
}, },
.gw = { .gw = {
.init_sel_class = batadv_v_init_sel_class,
.store_sel_class = batadv_v_store_sel_class, .store_sel_class = batadv_v_store_sel_class,
.show_sel_class = batadv_v_show_sel_class, .show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node, .get_best_gw_node = batadv_v_gw_get_best_gw_node,
...@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv) ...@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
if (ret < 0) if (ret < 0)
return ret; return ret;
/* set default throughput difference threshold to 5Mbps */
atomic_set(&bat_priv->gw.sel_class, 50);
return 0; return 0;
} }
......
...@@ -404,7 +404,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, ...@@ -404,7 +404,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
* batadv_frag_create - create a fragment from skb * batadv_frag_create - create a fragment from skb
* @skb: skb to create fragment from * @skb: skb to create fragment from
* @frag_head: header to use in new fragment * @frag_head: header to use in new fragment
* @mtu: size of new fragment * @fragment_size: size of new fragment
* *
* Split the passed skb into two fragments: A new one with size matching the * Split the passed skb into two fragments: A new one with size matching the
* passed mtu and the old one with the rest. The new skb contains data from the * passed mtu and the old one with the rest. The new skb contains data from the
...@@ -414,11 +414,11 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, ...@@ -414,11 +414,11 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
*/ */
static struct sk_buff *batadv_frag_create(struct sk_buff *skb, static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
struct batadv_frag_packet *frag_head, struct batadv_frag_packet *frag_head,
unsigned int mtu) unsigned int fragment_size)
{ {
struct sk_buff *skb_fragment; struct sk_buff *skb_fragment;
unsigned int header_size = sizeof(*frag_head); unsigned int header_size = sizeof(*frag_head);
unsigned int fragment_size = mtu - header_size; unsigned int mtu = fragment_size + header_size;
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
if (!skb_fragment) if (!skb_fragment)
...@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, ...@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
struct sk_buff *skb_fragment; struct sk_buff *skb_fragment;
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned int header_size = sizeof(frag_header); unsigned int header_size = sizeof(frag_header);
unsigned int max_fragment_size, max_packet_size; unsigned int max_fragment_size, num_fragments;
int ret; int ret;
/* To avoid merge and refragmentation at next-hops we never send /* To avoid merge and refragmentation at next-hops we never send
...@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb, ...@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
*/ */
mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
max_fragment_size = mtu - header_size; max_fragment_size = mtu - header_size;
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
if (skb->len == 0 || max_fragment_size == 0)
return -EINVAL;
num_fragments = (skb->len - 1) / max_fragment_size + 1;
max_fragment_size = (skb->len - 1) / num_fragments + 1;
/* Don't even try to fragment, if we need more than 16 fragments */ /* Don't even try to fragment, if we need more than 16 fragments */
if (skb->len > max_packet_size) { if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
ret = -EAGAIN; ret = -EAGAIN;
goto free_skb; goto free_skb;
} }
...@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb, ...@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
goto put_primary_if; goto put_primary_if;
} }
skb_fragment = batadv_frag_create(skb, &frag_header, mtu); skb_fragment = batadv_frag_create(skb, &frag_header,
max_fragment_size);
if (!skb_fragment) { if (!skb_fragment) {
ret = -ENOMEM; ret = -ENOMEM;
goto put_primary_if; goto put_primary_if;
......
...@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, ...@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
*/ */
void batadv_gw_init(struct batadv_priv *bat_priv) void batadv_gw_init(struct batadv_priv *bat_priv)
{ {
if (bat_priv->algo_ops->gw.init_sel_class)
bat_priv->algo_ops->gw.init_sel_class(bat_priv);
else
atomic_set(&bat_priv->gw.sel_class, 1);
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
NULL, BATADV_TVLV_GW, 1, NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND); BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
......
...@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev) ...@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
#endif #endif
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
atomic_set(&bat_priv->gw.sel_class, 20);
atomic_set(&bat_priv->gw.bandwidth_down, 100); atomic_set(&bat_priv->gw.bandwidth_down, 100);
atomic_set(&bat_priv->gw.bandwidth_up, 20); atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000); atomic_set(&bat_priv->orig_interval, 1000);
......
...@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops { ...@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
/** /**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
* @init_sel_class: initialize GW selection class (optional)
* @store_sel_class: parse and stores a new GW selection class (optional) * @store_sel_class: parse and stores a new GW selection class (optional)
* @show_sel_class: prints the current GW selection class (optional) * @show_sel_class: prints the current GW selection class (optional)
* @get_best_gw_node: select the best GW from the list of available nodes * @get_best_gw_node: select the best GW from the list of available nodes
...@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops { ...@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
* @dump: dump gateways to a netlink socket (optional) * @dump: dump gateways to a netlink socket (optional)
*/ */
struct batadv_algo_gw_ops { struct batadv_algo_gw_ops {
void (*init_sel_class)(struct batadv_priv *bat_priv);
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count); size_t count);
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment