Commit 8c70f138 authored by Marek Lindner's avatar Marek Lindner Committed by Greg Kroah-Hartman

Staging: batman-adv: multiple mesh clouds

This patch removes all remaining global variables and includes the
necessary bits into the bat_priv structure. It is the last
remaining piece to allow multiple concurrent mesh clouds on the
same device.
A few global variables have been rendered obsolete during the process
and have been removed entirely.
Signed-off-by: default avatarMarek Lindner <lindner_marek@yahoo.de>
[sven.eckelmann@gmx.de: Rework on top of current version]
Signed-off-by: default avatarSven Eckelmann <sven.eckelmann@gmx.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 6a0e9fa8
......@@ -102,10 +102,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
struct batman_if *if_incoming,
int own_packet)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct forw_packet *forw_packet_aggr;
unsigned long flags;
unsigned char *skb_buff;
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
/* own packet should always be scheduled */
if (!own_packet) {
......@@ -150,9 +150,9 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
forw_packet_aggr->direct_link_flags |= 1;
/* add new packet to packet list */
spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_add_head(&forw_packet_aggr->list, &forw_bat_list);
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
......@@ -198,11 +198,11 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
unsigned long flags;
/* find position for the packet in the forward queue */
spin_lock_irqsave(&forw_bat_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
/* own packets are not to be aggregated */
if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
list) {
hlist_for_each_entry(forw_packet_pos, tmp_node,
&bat_priv->forw_bat_list, list) {
if (can_aggregate_with(batman_packet,
packet_len,
send_time,
......@@ -219,7 +219,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
* suitable aggregation packet found */
if (forw_packet_aggr == NULL) {
/* the following section can run without the lock */
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/**
* if we could not aggregate this packet with one of the others
......@@ -237,7 +237,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
aggregate(forw_packet_aggr,
packet_buff, packet_len,
direct_link);
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
}
}
......
......@@ -411,7 +411,7 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
return sprintf(buff, "%s\n",
batman_if->if_status == IF_NOT_IN_USE ?
"none" : "bat0");
"none" : batman_if->soft_iface->name);
}
static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
......
......@@ -77,13 +77,15 @@ static int is_valid_iface(struct net_device *net_dev)
return 1;
}
static struct batman_if *get_active_batman_if(void)
static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
{
struct batman_if *batman_if;
/* TODO: should check interfaces belonging to bat_priv */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
if (batman_if->soft_iface != soft_iface)
continue;
if (batman_if->if_status == IF_ACTIVE)
goto out;
}
......@@ -99,23 +101,29 @@ static void set_primary_if(struct bat_priv *bat_priv,
struct batman_if *batman_if)
{
struct batman_packet *batman_packet;
struct vis_packet *vis_packet;
bat_priv->primary_if = batman_if;
if (!bat_priv->primary_if)
return;
set_main_if_addr(batman_if->net_dev->dev_addr);
batman_packet = (struct batman_packet *)(batman_if->packet_buff);
batman_packet->flags = PRIMARIES_FIRST_HOP;
batman_packet->ttl = TTL;
vis_packet = (struct vis_packet *)
bat_priv->my_vis_info->skb_packet->data;
memcpy(vis_packet->vis_orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(vis_packet->sender_orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
/***
* hacky trick to make sure that we send the HNA information via
* our new primary interface
*/
atomic_set(&hna_local_changed, 1);
atomic_set(&bat_priv->hna_local_changed, 1);
}
static bool hardif_is_iface_up(struct batman_if *batman_if)
......@@ -217,9 +225,6 @@ static void hardif_activate_interface(struct batman_if *batman_if)
bat_info(batman_if->soft_iface, "Interface activated: %s\n",
batman_if->dev);
if (atomic_read(&module_state) == MODULE_INACTIVE)
activate_module();
update_min_mtu(batman_if->soft_iface);
return;
}
......@@ -347,11 +352,16 @@ void hardif_disable_interface(struct batman_if *batman_if)
orig_hash_del_if(batman_if, bat_priv->num_ifaces);
if (batman_if == bat_priv->primary_if)
set_primary_if(bat_priv, get_active_batman_if());
set_primary_if(bat_priv,
get_active_batman_if(batman_if->soft_iface));
kfree(batman_if->packet_buff);
batman_if->packet_buff = NULL;
batman_if->if_status = IF_NOT_IN_USE;
/* delete all references to this batman_if */
purge_orig_ref(bat_priv);
purge_outstanding_packets(bat_priv, batman_if);
dev_put(batman_if->soft_iface);
/* nobody uses this interface anymore */
......@@ -359,10 +369,6 @@ void hardif_disable_interface(struct batman_if *batman_if)
softif_destroy(batman_if->soft_iface);
batman_if->soft_iface = NULL;
/*if ((atomic_read(&module_state) == MODULE_ACTIVE) &&
(bat_priv->num_ifaces == 0))
deactivate_module();*/
}
static struct batman_if *hardif_add_interface(struct net_device *net_dev)
......@@ -415,10 +421,6 @@ static void hardif_free_interface(struct rcu_head *rcu)
{
struct batman_if *batman_if = container_of(rcu, struct batman_if, rcu);
/* delete all references to this batman_if */
purge_orig(NULL);
purge_outstanding_packets(batman_if);
kfree(batman_if->dev);
kfree(batman_if);
}
......@@ -512,9 +514,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto err_out;
if (atomic_read(&module_state) != MODULE_ACTIVE)
goto err_free;
/* packet should hold at least type and version */
if (unlikely(!pskb_may_pull(skb, 2)))
goto err_free;
......@@ -524,12 +523,19 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|| !skb_mac_header(skb)))
goto err_free;
if (!batman_if->soft_iface)
goto err_free;
bat_priv = netdev_priv(batman_if->soft_iface);
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto err_free;
/* discard frames on not active interfaces */
if (batman_if->if_status != IF_ACTIVE)
goto err_free;
batman_packet = (struct batman_packet *)skb->data;
bat_priv = netdev_priv(batman_if->soft_iface);
if (batman_packet->version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
......
......@@ -218,11 +218,12 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
goto free_skb;
}
if (atomic_read(&module_state) != MODULE_ACTIVE)
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dst_unreach;
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = (struct orig_node *)hash_find(orig_hash, icmp_packet->dst);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
icmp_packet->dst));
if (!orig_node)
goto unlock;
......@@ -233,7 +234,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if (!batman_if)
goto dst_unreach;
......@@ -253,7 +254,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
goto out;
unlock:
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
dst_unreach:
icmp_packet->msg_type = DESTINATION_UNREACHABLE;
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
......
......@@ -34,28 +34,14 @@
#include "hash.h"
struct list_head if_list;
struct hlist_head forw_bat_list;
struct hlist_head forw_bcast_list;
struct hashtable_t *orig_hash;
DEFINE_SPINLOCK(orig_hash_lock);
DEFINE_SPINLOCK(forw_bat_list_lock);
DEFINE_SPINLOCK(forw_bcast_list_lock);
int16_t num_hna;
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
atomic_t module_state;
struct workqueue_struct *bat_event_workqueue;
static int __init batman_init(void)
{
INIT_LIST_HEAD(&if_list);
INIT_HLIST_HEAD(&forw_bat_list);
INIT_HLIST_HEAD(&forw_bcast_list);
atomic_set(&module_state, MODULE_INACTIVE);
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
......@@ -78,64 +64,78 @@ static int __init batman_init(void)
static void __exit batman_exit(void)
{
deactivate_module();
debugfs_destroy();
unregister_netdevice_notifier(&hard_if_notifier);
hardif_remove_interfaces();
flush_workqueue(bat_event_workqueue);
destroy_workqueue(bat_event_workqueue);
bat_event_workqueue = NULL;
}
/* activates the module, starts timer ... */
void activate_module(void)
int mesh_init(struct net_device *soft_iface)
{
if (originator_init() < 1)
struct bat_priv *bat_priv = netdev_priv(soft_iface);
spin_lock_init(&bat_priv->orig_hash_lock);
spin_lock_init(&bat_priv->forw_bat_list_lock);
spin_lock_init(&bat_priv->forw_bcast_list_lock);
spin_lock_init(&bat_priv->hna_lhash_lock);
spin_lock_init(&bat_priv->hna_ghash_lock);
spin_lock_init(&bat_priv->gw_list_lock);
spin_lock_init(&bat_priv->vis_hash_lock);
spin_lock_init(&bat_priv->vis_list_lock);
INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
INIT_HLIST_HEAD(&bat_priv->gw_list);
if (originator_init(bat_priv) < 1)
goto err;
if (hna_local_init() < 1)
if (hna_local_init(bat_priv) < 1)
goto err;
if (hna_global_init() < 1)
if (hna_global_init(bat_priv) < 1)
goto err;
/*hna_local_add(soft_device->dev_addr);*/
hna_local_add(soft_iface, soft_iface->dev_addr);
if (vis_init() < 1)
if (vis_init(bat_priv) < 1)
goto err;
/*update_min_mtu();*/
atomic_set(&module_state, MODULE_ACTIVE);
atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
goto end;
err:
pr_err("Unable to allocate memory for mesh information structures: "
"out of mem ?\n");
deactivate_module();
mesh_free(soft_iface);
return -1;
end:
return;
return 0;
}
/* shuts down the whole module.*/
void deactivate_module(void)
void mesh_free(struct net_device *soft_iface)
{
atomic_set(&module_state, MODULE_DEACTIVATING);
struct bat_priv *bat_priv = netdev_priv(soft_iface);
purge_outstanding_packets(NULL);
flush_workqueue(bat_event_workqueue);
atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
purge_outstanding_packets(bat_priv, NULL);
vis_quit();
vis_quit(bat_priv);
originator_free();
originator_free(bat_priv);
hna_local_free();
hna_global_free();
hna_local_free(bat_priv);
hna_global_free(bat_priv);
synchronize_net();
synchronize_rcu();
atomic_set(&module_state, MODULE_INACTIVE);
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
}
void inc_module_count(void)
......
......@@ -76,9 +76,9 @@
#define EXPECTED_SEQNO_RANGE 65536
/* don't reset again within 30 seconds */
#define MODULE_INACTIVE 0
#define MODULE_ACTIVE 1
#define MODULE_DEACTIVATING 2
#define MESH_INACTIVE 0
#define MESH_ACTIVE 1
#define MESH_DEACTIVATING 2
#define BCAST_QUEUE_LEN 256
#define BATMAN_QUEUE_LEN 256
......@@ -128,22 +128,12 @@
#endif
extern struct list_head if_list;
extern struct hlist_head forw_bat_list;
extern struct hlist_head forw_bcast_list;
extern struct hashtable_t *orig_hash;
extern spinlock_t orig_hash_lock;
extern spinlock_t forw_bat_list_lock;
extern spinlock_t forw_bcast_list_lock;
extern int16_t num_hna;
extern unsigned char broadcast_addr[];
extern atomic_t module_state;
extern struct workqueue_struct *bat_event_workqueue;
void activate_module(void);
void deactivate_module(void);
int mesh_init(struct net_device *soft_iface);
void mesh_free(struct net_device *soft_iface);
void inc_module_count(void);
void dec_module_count(void);
int addr_to_string(char *buff, uint8_t *addr);
......@@ -154,7 +144,7 @@ int is_bcast(uint8_t *addr);
int is_mcast(uint8_t *addr);
#ifdef CONFIG_BATMAN_ADV_DEBUG
extern int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
#define bat_dbg(type, bat_priv, fmt, arg...) \
do { \
......
......@@ -29,31 +29,32 @@
#include "hard-interface.h"
#include "unicast.h"
static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig);
static void purge_orig(struct work_struct *work);
static void start_purge_timer(void)
static void start_purge_timer(struct bat_priv *bat_priv)
{
queue_delayed_work(bat_event_workqueue, &purge_orig_wq, 1 * HZ);
INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
}
int originator_init(void)
int originator_init(struct bat_priv *bat_priv)
{
unsigned long flags;
if (orig_hash)
if (bat_priv->orig_hash)
return 1;
spin_lock_irqsave(&orig_hash_lock, flags);
orig_hash = hash_new(128, compare_orig, choose_orig);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig);
if (!orig_hash)
if (!bat_priv->orig_hash)
goto err;
spin_unlock_irqrestore(&orig_hash_lock, flags);
start_purge_timer();
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
start_purge_timer(bat_priv);
return 1;
err:
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
}
......@@ -104,19 +105,19 @@ static void free_orig_node(void *data, void *arg)
kfree(orig_node);
}
void originator_free(void)
void originator_free(struct bat_priv *bat_priv)
{
unsigned long flags;
if (!orig_hash)
if (!bat_priv->orig_hash)
return;
cancel_delayed_work_sync(&purge_orig_wq);
cancel_delayed_work_sync(&bat_priv->orig_work);
spin_lock_irqsave(&orig_hash_lock, flags);
/*hash_delete(orig_hash, free_orig_node, bat_priv);*/
orig_hash = NULL;
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
bat_priv->orig_hash = NULL;
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
/* this function finds or creates an originator entry for the given
......@@ -127,9 +128,9 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
struct hashtable_t *swaphash;
int size;
orig_node = ((struct orig_node *)hash_find(orig_hash, addr));
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr));
if (orig_node != NULL)
if (orig_node)
return orig_node;
bat_dbg(DBG_BATMAN, bat_priv,
......@@ -164,17 +165,18 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
if (!orig_node->bcast_own_sum)
goto free_bcast_own;
if (hash_add(orig_hash, orig_node) < 0)
if (hash_add(bat_priv->orig_hash, orig_node) < 0)
goto free_bcast_own_sum;
if (orig_hash->elements * 4 > orig_hash->size) {
swaphash = hash_resize(orig_hash, orig_hash->size * 2);
if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) {
swaphash = hash_resize(bat_priv->orig_hash,
bat_priv->orig_hash->size * 2);
if (swaphash == NULL)
if (!swaphash)
bat_dbg(DBG_BATMAN, bat_priv,
"Couldn't resize orig hash table\n");
else
orig_hash = swaphash;
bat_priv->orig_hash = swaphash;
}
return orig_node;
......@@ -203,8 +205,8 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
if ((time_after(jiffies,
neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
(neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED)) {
(neigh_node->if_incoming->if_status == IF_INACTIVE) ||
(neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
if (neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED)
......@@ -260,34 +262,46 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
return false;
}
void purge_orig(struct work_struct *work)
static void _purge_orig(struct bat_priv *bat_priv)
{
HASHIT(hashit);
struct orig_node *orig_node;
unsigned long flags;
spin_lock_irqsave(&orig_hash_lock, flags);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
/* for all origins... */
while (hash_iterate(orig_hash, &hashit)) {
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
/*if (purge_orig_node(bat_priv, orig_node)) {
hash_remove_bucket(orig_hash, &hashit);
free_orig_node(orig_node);
}*/
if (purge_orig_node(bat_priv, orig_node)) {
hash_remove_bucket(bat_priv->orig_hash, &hashit);
free_orig_node(orig_node, bat_priv);
}
if (time_after(jiffies, (orig_node->last_frag_packet +
msecs_to_jiffies(FRAG_TIMEOUT))))
frag_list_free(&orig_node->frag_list);
}
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
static void purge_orig(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, orig_work);
_purge_orig(bat_priv);
start_purge_timer(bat_priv);
}
/* if work == NULL we were not called by the timer
* and thus do not need to re-arm the timer */
if (work)
start_purge_timer();
void purge_orig_ref(struct bat_priv *bat_priv)
{
_purge_orig(bat_priv);
}
int orig_seq_print_text(struct seq_file *seq, void *offset)
......@@ -325,9 +339,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
"outgoingIF", "Potential nexthops");
rcu_read_unlock();
spin_lock_irqsave(&orig_hash_lock, flags);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
......@@ -359,7 +373,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
batman_count++;
}
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if ((batman_count == 0))
seq_printf(seq, "No batman nodes in range ...\n");
......@@ -399,26 +413,27 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct orig_node *orig_node;
unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
spin_lock_irqsave(&orig_hash_lock, flags);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
if (orig_node_add_if(orig_node, max_if_num) == -1)
goto err;
}
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
err:
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return -ENOMEM;
}
......@@ -476,6 +491,7 @@ static int orig_node_del_if(struct orig_node *orig_node,
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
{
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
unsigned long flags;
......@@ -484,9 +500,9 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
spin_lock_irqsave(&orig_hash_lock, flags);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
ret = orig_node_del_if(orig_node, max_if_num,
......@@ -505,16 +521,19 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
if (batman_if == batman_if_tmp)
continue;
if (batman_if->soft_iface != batman_if_tmp->soft_iface)
continue;
if (batman_if_tmp->if_num > batman_if->if_num)
batman_if_tmp->if_num--;
}
rcu_read_unlock();
batman_if->if_num = -1;
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
err:
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return -ENOMEM;
}
......@@ -22,9 +22,9 @@
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
#define _NET_BATMAN_ADV_ORIGINATOR_H_
int originator_init(void);
void originator_free(void);
void purge_orig(struct work_struct *work);
int originator_init(struct bat_priv *bat_priv);
void originator_free(struct bat_priv *bat_priv);
void purge_orig_ref(struct bat_priv *bat_priv);
struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
......
This diff is collapsed.
......@@ -37,8 +37,7 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bat_packet(struct sk_buff *skb,
struct batman_if *batman_if);
int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
struct neigh_node *find_router(struct orig_node *orig_node,
struct batman_if *recv_if);
void update_bonding_candidates(struct bat_priv *bat_priv,
......
......@@ -160,8 +160,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
static void send_packet(struct forw_packet *forw_packet)
{
struct batman_if *batman_if;
struct bat_priv *bat_priv =
netdev_priv(forw_packet->if_incoming->soft_iface);
struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct batman_packet *batman_packet =
(struct batman_packet *)(forw_packet->skb->data);
unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
......@@ -199,18 +199,24 @@ static void send_packet(struct forw_packet *forw_packet)
/* broadcast on every interface */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list)
list_for_each_entry_rcu(batman_if, &if_list, list) {
if (batman_if->soft_iface != soft_iface)
continue;
send_packet_to_if(forw_packet, batman_if);
}
rcu_read_unlock();
}
static void rebuild_batman_packet(struct batman_if *batman_if)
static void rebuild_batman_packet(struct bat_priv *bat_priv,
struct batman_if *batman_if)
{
int new_len;
unsigned char *new_buff;
struct batman_packet *batman_packet;
new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
new_len = sizeof(struct batman_packet) +
(bat_priv->num_local_hna * ETH_ALEN);
new_buff = kmalloc(new_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
......@@ -219,9 +225,9 @@ static void rebuild_batman_packet(struct batman_if *batman_if)
sizeof(struct batman_packet));
batman_packet = (struct batman_packet *)new_buff;
batman_packet->num_hna = hna_local_fill_buffer(
new_buff + sizeof(struct batman_packet),
new_len - sizeof(struct batman_packet));
batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
new_buff + sizeof(struct batman_packet),
new_len - sizeof(struct batman_packet));
kfree(batman_if->packet_buff);
batman_if->packet_buff = new_buff;
......@@ -253,9 +259,9 @@ void schedule_own_packet(struct batman_if *batman_if)
batman_if->if_status = IF_ACTIVE;
/* if local hna has changed and interface is a primary interface */
if ((atomic_read(&hna_local_changed)) &&
if ((atomic_read(&bat_priv->hna_local_changed)) &&
(batman_if == bat_priv->primary_if))
rebuild_batman_packet(batman_if);
rebuild_batman_packet(bat_priv, batman_if);
/**
* NOTE: packet_buff might just have been re-allocated in
......@@ -351,16 +357,17 @@ static void forw_packet_free(struct forw_packet *forw_packet)
kfree(forw_packet);
}
static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
struct forw_packet *forw_packet,
unsigned long send_time)
{
unsigned long flags;
INIT_HLIST_NODE(&forw_packet->list);
/* add new packet to packet list */
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_add_head(&forw_packet->list, &forw_bcast_list);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
......@@ -388,6 +395,9 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
goto out;
}
if (!bat_priv->primary_if)
goto out;
forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet)
......@@ -409,7 +419,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
_add_bcast_packet_to_list(forw_packet, 1);
_add_bcast_packet_to_list(bat_priv, forw_packet, 1);
return NETDEV_TX_OK;
packet_free:
......@@ -429,23 +439,26 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
struct sk_buff *skb1;
struct bat_priv *bat_priv;
struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
struct bat_priv *bat_priv = netdev_priv(soft_iface);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
if (atomic_read(&module_state) == MODULE_DEACTIVATING)
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
/* rebroadcast packet */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
if (batman_if->soft_iface != soft_iface)
continue;
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
send_skb_packet(skb1,
batman_if, broadcast_addr);
send_skb_packet(skb1, batman_if, broadcast_addr);
}
rcu_read_unlock();
......@@ -453,12 +466,12 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
_add_bcast_packet_to_list(bat_priv, forw_packet,
((5 * HZ) / 1000));
return;
}
out:
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
forw_packet_free(forw_packet);
atomic_inc(&bat_priv->bcast_queue_left);
}
......@@ -472,11 +485,12 @@ void send_outstanding_bat_packet(struct work_struct *work)
unsigned long flags;
struct bat_priv *bat_priv;
spin_lock_irqsave(&forw_bat_list_lock, flags);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_del(&forw_packet->list);
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
if (atomic_read(&module_state) == MODULE_DEACTIVATING)
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
send_packet(forw_packet);
......@@ -490,8 +504,6 @@ void send_outstanding_bat_packet(struct work_struct *work)
schedule_own_packet(forw_packet->if_incoming);
out:
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
/* don't count own packet */
if (!forw_packet->own)
atomic_inc(&bat_priv->batman_queue_left);
......@@ -499,29 +511,25 @@ void send_outstanding_bat_packet(struct work_struct *work)
forw_packet_free(forw_packet);
}
void purge_outstanding_packets(struct batman_if *batman_if)
void purge_outstanding_packets(struct bat_priv *bat_priv,
struct batman_if *batman_if)
{
struct bat_priv *bat_priv;
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
unsigned long flags;
if (batman_if->soft_iface) {
bat_priv = netdev_priv(batman_if->soft_iface);
if (batman_if)
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets(): %s\n",
batman_if->dev);
else
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets()\n");
}
if (batman_if)
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets(): %s\n",
batman_if->dev);
else
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets()\n");
/* free bcast list */
spin_lock_irqsave(&forw_bcast_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bcast_list, list) {
&bat_priv->forw_bcast_list, list) {
/**
* if purge_outstanding_packets() was called with an argmument
......@@ -531,21 +539,21 @@ void purge_outstanding_packets(struct batman_if *batman_if)
(forw_packet->if_incoming != batman_if))
continue;
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/**
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
}
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/* free batman packet list */
spin_lock_irqsave(&forw_bat_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bat_list, list) {
&bat_priv->forw_bat_list, list) {
/**
* if purge_outstanding_packets() was called with an argmument
......@@ -555,14 +563,14 @@ void purge_outstanding_packets(struct batman_if *batman_if)
(forw_packet->if_incoming != batman_if))
continue;
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/**
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock_irqsave(&forw_bat_list_lock, flags);
spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
}
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
}
......@@ -35,6 +35,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
struct batman_if *if_outgoing);
int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
void send_outstanding_bat_packet(struct work_struct *work);
void purge_outstanding_packets(struct batman_if *batman_if);
void purge_outstanding_packets(struct bat_priv *bat_priv,
struct batman_if *batman_if);
#endif /* _NET_BATMAN_ADV_SEND_H_ */
......@@ -35,10 +35,7 @@
#include <linux/etherdevice.h>
#include "unicast.h"
static uint32_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
* broadcast storms */
unsigned char main_if_addr[ETH_ALEN];
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static void bat_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info);
......@@ -58,11 +55,6 @@ static const struct ethtool_ops bat_ethtool_ops = {
.set_rx_csum = bat_set_rx_csum
};
void set_main_if_addr(uint8_t *addr)
{
memcpy(main_if_addr, addr, ETH_ALEN);
}
int my_skb_head_push(struct sk_buff *skb, unsigned int len)
{
int result;
......@@ -76,7 +68,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
* to write freely in that area.
*/
result = skb_cow_head(skb, len);
if (result < 0)
return result;
......@@ -111,7 +102,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
/* only modify hna-table if it has been initialised before */
if (atomic_read(&module_state) == MODULE_ACTIVE) {
if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
hna_local_remove(bat_priv, dev->dev_addr,
"mac address changed");
hna_local_add(dev, addr->sa_data);
......@@ -140,7 +131,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
struct bcast_packet *bcast_packet;
int data_len = skb->len, ret;
if (atomic_read(&module_state) != MODULE_ACTIVE)
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dropped;
soft_iface->trans_start = jiffies;
......@@ -150,6 +141,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* ethernet packet should be broadcasted */
if (is_bcast(ethhdr->h_dest) || is_mcast(ethhdr->h_dest)) {
if (!bat_priv->primary_if)
goto dropped;
if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
goto dropped;
......@@ -163,14 +156,14 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh */
memcpy(bcast_packet->orig, main_if_addr, ETH_ALEN);
memcpy(bcast_packet->orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
/* set broadcast sequence number */
bcast_packet->seqno = htonl(bcast_seqno);
bcast_packet->seqno =
htonl(atomic_inc_return(&bat_priv->bcast_seqno));
/* broadcast packet. on success, increase seqno. */
if (add_bcast_packet_to_list(bat_priv, skb) == NETDEV_TX_OK)
bcast_seqno++;
add_bcast_packet_to_list(bat_priv, skb);
/* a copy is stored in the bcast list, therefore removing
* the original skb. */
......@@ -179,10 +172,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* unicast packet */
} else {
ret = unicast_send_skb(skb, bat_priv);
if (ret != 0) {
bat_priv->stats.tx_dropped++;
goto end;
}
if (ret != 0)
goto dropped_freed;
}
bat_priv->stats.tx_packets++;
......@@ -190,8 +181,9 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
goto end;
dropped:
bat_priv->stats.tx_dropped++;
kfree_skb(skb);
dropped_freed:
bat_priv->stats.tx_dropped++;
end:
return NETDEV_TX_OK;
}
......@@ -292,7 +284,6 @@ struct net_device *softif_create(char *name)
}
ret = register_netdev(soft_iface);
if (ret < 0) {
pr_err("Unable to register the batman interface '%s': %i\n",
name, ret);
......@@ -310,21 +301,29 @@ struct net_device *softif_create(char *name)
atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
atomic_set(&bat_priv->hna_local_changed, 0);
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
ret = sysfs_add_meshif(soft_iface);
if (ret < 0)
goto unreg_soft_iface;
ret = debugfs_add_meshif(soft_iface);
if (ret < 0)
goto unreg_sysfs;
ret = mesh_init(soft_iface);
if (ret < 0)
goto unreg_debugfs;
return soft_iface;
unreg_debugfs:
debugfs_del_meshif(soft_iface);
unreg_sysfs:
sysfs_del_meshif(soft_iface);
unreg_soft_iface:
......@@ -341,6 +340,7 @@ void softif_destroy(struct net_device *soft_iface)
{
debugfs_del_meshif(soft_iface);
sysfs_del_meshif(soft_iface);
mesh_free(soft_iface);
unregister_netdevice(soft_iface);
}
......
......@@ -22,7 +22,6 @@
#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
void set_main_if_addr(uint8_t *addr);
int my_skb_head_push(struct sk_buff *skb, unsigned int len);
int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
void interface_rx(struct net_device *soft_iface,
......@@ -30,6 +29,4 @@ void interface_rx(struct net_device *soft_iface,
struct net_device *softif_create(char *name);
void softif_destroy(struct net_device *soft_iface);
extern unsigned char main_if_addr[];
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
......@@ -24,25 +24,22 @@
#include "types.h"
int hna_local_init(void);
int hna_local_init(struct bat_priv *bat_priv);
void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
void hna_local_remove(struct bat_priv *bat_priv,
uint8_t *addr, char *message);
int hna_local_fill_buffer(unsigned char *buff, int buff_len);
int hna_local_fill_buffer(struct bat_priv *bat_priv,
unsigned char *buff, int buff_len);
int hna_local_seq_print_text(struct seq_file *seq, void *offset);
void hna_local_free(void);
int hna_global_init(void);
void hna_local_free(struct bat_priv *bat_priv);
int hna_global_init(struct bat_priv *bat_priv);
void hna_global_add_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node,
unsigned char *hna_buff, int hna_buff_len);
int hna_global_seq_print_text(struct seq_file *seq, void *offset);
void hna_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, char *message);
void hna_global_free(void);
struct orig_node *transtable_search(uint8_t *addr);
extern spinlock_t hna_local_hash_lock;
extern struct hashtable_t *hna_local_hash;
extern atomic_t hna_local_changed;
void hna_global_free(struct bat_priv *bat_priv);
struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
......@@ -51,18 +51,18 @@ struct batman_if {
};
/**
* orig_node - structure for orig_list maintaining nodes of mesh
* @primary_addr: hosts primary interface address
* @last_valid: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
* @batman_seqno_reset: time when the batman seqno window was reset
* @flags: for now only VIS_SERVER flag
* @last_real_seqno: last and best known squence number
* @last_ttl: ttl of last received packet
* @last_bcast_seqno: last broadcast sequence number received by this host
*
* @candidates: how many candidates are available
* @selected: next bonding candidate
* orig_node - structure for orig_list maintaining nodes of mesh
* @primary_addr: hosts primary interface address
* @last_valid: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
* @batman_seqno_reset: time when the batman seqno window was reset
* @flags: for now only VIS_SERVER flag
* @last_real_seqno: last and best known squence number
* @last_ttl: ttl of last received packet
* @last_bcast_seqno: last broadcast sequence number received by this host
*
* @candidates: how many candidates are available
* @selected: next bonding candidate
*/
struct orig_node {
uint8_t orig[ETH_ALEN];
......@@ -92,8 +92,8 @@ struct orig_node {
};
/**
* neigh_node
* @last_valid: when last packet via this neighbor was received
* neigh_node
* @last_valid: when last packet via this neighbor was received
*/
struct neigh_node {
struct list_head list;
......@@ -111,6 +111,7 @@ struct neigh_node {
};
struct bat_priv {
atomic_t mesh_state;
struct net_device_stats stats;
atomic_t aggregation_enabled;
atomic_t bonding_enabled;
......@@ -118,6 +119,7 @@ struct bat_priv {
atomic_t vis_mode;
atomic_t orig_interval;
atomic_t log_level;
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
char num_ifaces;
......@@ -125,6 +127,29 @@ struct bat_priv {
struct batman_if *primary_if;
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
struct hlist_head forw_bcast_list;
struct hlist_head gw_list;
struct list_head vis_send_list;
struct hashtable_t *orig_hash;
struct hashtable_t *hna_local_hash;
struct hashtable_t *hna_global_hash;
struct hashtable_t *vis_hash;
spinlock_t orig_hash_lock;
spinlock_t forw_bat_list_lock;
spinlock_t forw_bcast_list_lock;
spinlock_t hna_lhash_lock;
spinlock_t hna_ghash_lock;
spinlock_t gw_list_lock;
spinlock_t vis_hash_lock;
spinlock_t vis_list_lock;
int16_t num_local_hna;
atomic_t hna_local_changed;
struct delayed_work hna_work;
struct delayed_work orig_work;
struct delayed_work vis_work;
struct gw_node *curr_gw;
struct vis_info *my_vis_info;
};
struct socket_client {
......@@ -154,8 +179,8 @@ struct hna_global_entry {
};
/**
* forw_packet - structure for forw_list maintaining packets to be
* send/forwarded
* forw_packet - structure for forw_list maintaining packets to be
* send/forwarded
*/
struct forw_packet {
struct hlist_node list;
......@@ -193,4 +218,28 @@ struct frag_packet_list_entry {
struct sk_buff *skb;
};
struct vis_info {
unsigned long first_seen;
struct list_head recv_list;
/* list of server-neighbors we received a vis-packet
* from. we should not reply to them. */
struct list_head send_list;
struct kref refcount;
struct bat_priv *bat_priv;
/* this packet might be part of the vis send queue. */
struct sk_buff *skb_packet;
/* vis_info may follow here*/
} __attribute__((packed));
struct vis_info_entry {
uint8_t src[ETH_ALEN];
uint8_t dest[ETH_ALEN];
uint8_t quality; /* quality = 0 means HNA */
} __attribute__((packed));
struct recvlist_node {
struct list_head list;
uint8_t mac[ETH_ALEN];
};
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
......@@ -34,7 +34,7 @@ struct sk_buff *merge_frag_packet(struct list_head *head,
struct sk_buff *skb)
{
struct unicast_frag_packet *up =
(struct unicast_frag_packet *) skb->data;
(struct unicast_frag_packet *)skb->data;
struct sk_buff *tmp_skb;
/* set skb to the first part and tmp_skb to the second part */
......@@ -66,7 +66,7 @@ void create_frag_entry(struct list_head *head, struct sk_buff *skb)
{
struct frag_packet_list_entry *tfp;
struct unicast_frag_packet *up =
(struct unicast_frag_packet *) skb->data;
(struct unicast_frag_packet *)skb->data;
/* free and oldest packets stand at the end */
tfp = list_entry((head)->prev, typeof(*tfp), list);
......@@ -115,7 +115,7 @@ struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
if (tfp->seqno == ntohs(up->seqno))
goto mov_tail;
tmp_up = (struct unicast_frag_packet *) tfp->skb->data;
tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
if (tfp->seqno == search_seqno) {
......@@ -210,14 +210,15 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
uint8_t dstaddr[6];
unsigned long flags;
spin_lock_irqsave(&orig_hash_lock, flags);
spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
/* get routing information */
orig_node = ((struct orig_node *)hash_find(orig_hash, ethhdr->h_dest));
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
ethhdr->h_dest));
/* check for hna host */
if (!orig_node)
orig_node = transtable_search(ethhdr->h_dest);
orig_node = transtable_search(bat_priv, ethhdr->h_dest);
router = find_router(orig_node, NULL);
......@@ -230,7 +231,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
batman_if = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if (batman_if->if_status != IF_ACTIVE)
goto dropped;
......@@ -257,7 +258,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
return 0;
unlock:
spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
dropped:
kfree_skb(skb);
return 1;
......
This diff is collapsed.
......@@ -24,29 +24,6 @@
#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */
struct vis_info {
unsigned long first_seen;
struct list_head recv_list;
/* list of server-neighbors we received a vis-packet
* from. we should not reply to them. */
struct list_head send_list;
struct kref refcount;
/* this packet might be part of the vis send queue. */
struct sk_buff *skb_packet;
/* vis_info may follow here*/
} __attribute__((packed));
struct vis_info_entry {
uint8_t src[ETH_ALEN];
uint8_t dest[ETH_ALEN];
uint8_t quality; /* quality = 0 means HNA */
} __attribute__((packed));
struct recvlist_node {
struct list_head list;
uint8_t mac[ETH_ALEN];
};
int vis_seq_print_text(struct seq_file *seq, void *offset);
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
......@@ -54,7 +31,7 @@ void receive_server_sync_packet(struct bat_priv *bat_priv,
void receive_client_update_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len);
int vis_init(void);
void vis_quit(void);
int vis_init(struct bat_priv *bat_priv);
void vis_quit(struct bat_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_VIS_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment