Commit aa86b03c authored by David S. Miller's avatar David S. Miller

Merge tag 'batadv-net-for-davem-20180919' of git://git.open-mesh.org/linux-merge

Simon Wunderlich says:

====================
pull request for net: batman-adv 2018-09-19

here are some bugfixes which we would like to see integrated into net.

We forgot to bump the version number in the last round for net-next, so
the belated patch to do that is included - we hope you can adopt it.
This will most likely create a merge conflict later when merging into
net-next with this rounds net-next patchset, but net-next should keep
the 2018.4 version[1].

[1] resolution:

--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -25,11 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"

 #ifndef BATADV_SOURCE_VERSION
-<<<<<<<
-#define BATADV_SOURCE_VERSION "2018.3"
-=======
 #define BATADV_SOURCE_VERSION "2018.4"
->>>>>>>
 #endif

 /* B.A.T.M.A.N. parameters */

Please pull or let me know of any problem!

Here are some batman-adv bugfixes:

 - Avoid ELP information leak, by Sven Eckelmann

 - Fix sysfs segfault issues, by Sven Eckelmann (2 patches)

 - Fix locking when adding entries in various lists,
   by Sven Eckelmann (5 patches)

 - Fix refcount if queue_work() fails, by Marek Lindner (2 patches)

 - Fixup forgotten version bump, by Sven Eckelmann
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 69ba423d dabeb13e
......@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
......@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
......@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
queue_work(batadv_event_workqueue,
&hardif_neigh->bat_v.metric_work);
ret = queue_work(batadv_event_workqueue,
&hardif_neigh->bat_v.metric_work);
if (!ret)
batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();
......
......@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
bool ret;
ethhdr = eth_hdr(skb);
......@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (unlikely(!backbone_gw))
return true;
queue_work(batadv_event_workqueue, &backbone_gw->report_work);
/* backbone_gw is unreferenced in the report work function function */
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
/* backbone_gw is unreferenced in the report work function function
* if queue_work() call was successful
*/
if (!ret)
batadv_backbone_gw_put(backbone_gw);
return true;
}
......
......@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
......@@ -348,6 +349,9 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
*
* Has to be called with the appropriate locks being acquired
* (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
......@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node;
lockdep_assert_held(&bat_priv->gw.list_lock);
if (gateway->bandwidth_down == 0)
return;
......@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
spin_lock_bh(&bat_priv->gw.list_lock);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
......@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
......
......@@ -25,7 +25,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
#define BATADV_SOURCE_VERSION "2018.2"
#define BATADV_SOURCE_VERSION "2018.3"
#endif
/* B.A.T.M.A.N. parameters */
......
......@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
/* Select ingoing or outgoing coding node */
if (in_coding) {
lock = &orig_neigh_node->in_coding_list_lock;
list = &orig_neigh_node->in_coding_list;
} else {
lock = &orig_neigh_node->out_coding_list_lock;
list = &orig_neigh_node->out_coding_list;
}
spin_lock_bh(lock);
/* Check if nc_node is already added */
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
/* Node found */
if (nc_node)
return nc_node;
goto unlock;
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
if (!nc_node)
return NULL;
goto unlock;
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
......@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
/* Select ingoing or outgoing coding node */
if (in_coding) {
lock = &orig_neigh_node->in_coding_list_lock;
list = &orig_neigh_node->in_coding_list;
} else {
lock = &orig_neigh_node->out_coding_list_lock;
list = &orig_neigh_node->out_coding_list;
}
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
spin_lock_bh(lock);
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
unlock:
spin_unlock_bh(lock);
return nc_node;
......
......@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
struct batadv_softif_vlan *vlan;
int err;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan)
if (!vlan) {
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
}
vlan->bat_priv = bat_priv;
vlan->vid = vid;
......@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
atomic_set(&vlan->ap_isolation, 0);
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
/* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
* sleeping behavior of the sysfs functions and the fs_reclaim lock
*/
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
if (err) {
kfree(vlan);
/* ref for the function */
batadv_softif_vlan_put(vlan);
/* ref for the list */
batadv_softif_vlan_put(vlan);
return err;
}
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
......
......@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
&bat_priv->_var, net_dev); \
&bat_priv->_var, net_dev, \
NULL); \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
......@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
length = __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
&hard_iface->_var, net_dev); \
&hard_iface->_var, \
hard_iface->soft_iface, \
net_dev); \
\
batadv_hardif_put(hard_iface); \
return length; \
......@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
static int batadv_store_uint_attr(const char *buff, size_t count,
struct net_device *net_dev,
struct net_device *slave_dev,
const char *attr_name,
unsigned int min, unsigned int max,
atomic_t *attr)
{
char ifname[IFNAMSIZ + 3] = "";
unsigned long uint_val;
int ret;
......@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
if (atomic_read(attr) == uint_val)
return count;
batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
attr_name, atomic_read(attr), uint_val);
if (slave_dev)
snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
attr_name, ifname, atomic_read(attr), uint_val);
atomic_set(attr, uint_val);
return count;
......@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
void (*post_func)(struct net_device *),
const struct attribute *attr,
atomic_t *attr_store,
struct net_device *net_dev)
struct net_device *net_dev,
struct net_device *slave_dev)
{
int ret;
ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
attr_store);
ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
attr->name, min, max, attr_store);
if (post_func && ret)
post_func(net_dev);
......@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
bat_priv->soft_iface);
bat_priv->soft_iface, NULL);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
......@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
if (old_tp_override == tp_override)
goto out;
batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
"throughput_override",
batadv_info(hard_iface->soft_iface,
"%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
"throughput_override", net_dev->name,
old_tp_override / 10, old_tp_override % 10,
tp_override / 10, tp_override % 10);
......
......@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
{
struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global->list_lock);
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
......@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
spin_lock_bh(&tt_global->list_lock);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
sync_flags:
......@@ -1647,6 +1647,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
spin_unlock_bh(&tt_global->list_lock);
}
/**
......
......@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
{
struct batadv_tvlv_handler *tvlv_handler;
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
if (!tvlv_handler)
if (!tvlv_handler) {
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
}
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
......@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment