Commit 7d02674f authored by Simon Wunderlich's avatar Simon Wunderlich Committed by Greg Kroah-Hartman

Staging: batman-adv: move queue counters into bat_priv

to support multiple mesh devices later, we need to move global variables
like the queues into corresponding private structs bat_priv of the soft
devices.

Note that this patch still has a lot of FIXMEs and depends on the global
soft_device variable. This should be resolved later, e.g. by referencing
the parent soft device in batman_if.
Signed-off-by: default avatarSimon Wunderlich <siwu@hrz.tu-chemnitz.de>
[sven.eckelmann@gmx.de: Rework on top of current version]
Signed-off-by: default avatarSven Eckelmann <sven.eckelmann@gmx.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 8bb22a38
...@@ -111,7 +111,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, ...@@ -111,7 +111,7 @@ static void new_aggregated_packet(unsigned char *packet_buff,
/* own packet should always be scheduled */ /* own packet should always be scheduled */
if (!own_packet) { if (!own_packet) {
if (!atomic_dec_not_zero(&batman_queue_left)) { if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv, bat_dbg(DBG_BATMAN, bat_priv,
"batman packet queue full\n"); "batman packet queue full\n");
return; return;
...@@ -121,7 +121,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, ...@@ -121,7 +121,7 @@ static void new_aggregated_packet(unsigned char *packet_buff,
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet_aggr) { if (!forw_packet_aggr) {
if (!own_packet) if (!own_packet)
atomic_inc(&batman_queue_left); atomic_inc(&bat_priv->batman_queue_left);
return; return;
} }
...@@ -129,7 +129,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, ...@@ -129,7 +129,7 @@ static void new_aggregated_packet(unsigned char *packet_buff,
GFP_ATOMIC); GFP_ATOMIC);
if (!forw_packet_aggr->packet_buff) { if (!forw_packet_aggr->packet_buff) {
if (!own_packet) if (!own_packet)
atomic_inc(&batman_queue_left); atomic_inc(&bat_priv->batman_queue_left);
kfree(forw_packet_aggr); kfree(forw_packet_aggr);
return; return;
} }
......
...@@ -311,6 +311,8 @@ int sysfs_add_meshif(struct net_device *dev) ...@@ -311,6 +311,8 @@ int sysfs_add_meshif(struct net_device *dev)
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->orig_interval, 1000); atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->log_level, 0); atomic_set(&bat_priv->log_level, 0);
atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
bat_priv->primary_if = NULL; bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0; bat_priv->num_ifaces = 0;
......
...@@ -42,9 +42,6 @@ DEFINE_SPINLOCK(orig_hash_lock); ...@@ -42,9 +42,6 @@ DEFINE_SPINLOCK(orig_hash_lock);
DEFINE_SPINLOCK(forw_bat_list_lock); DEFINE_SPINLOCK(forw_bat_list_lock);
DEFINE_SPINLOCK(forw_bcast_list_lock); DEFINE_SPINLOCK(forw_bcast_list_lock);
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
int16_t num_hna; int16_t num_hna;
struct net_device *soft_device; struct net_device *soft_device;
...@@ -69,9 +66,6 @@ static int __init batman_init(void) ...@@ -69,9 +66,6 @@ static int __init batman_init(void)
atomic_set(&module_state, MODULE_INACTIVE); atomic_set(&module_state, MODULE_INACTIVE);
atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
/* the name should not be longer than 10 chars - see /* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */ * http://lwn.net/Articles/23634/ */
bat_event_workqueue = create_singlethread_workqueue("bat_events"); bat_event_workqueue = create_singlethread_workqueue("bat_events");
......
...@@ -136,8 +136,6 @@ extern spinlock_t orig_hash_lock; ...@@ -136,8 +136,6 @@ extern spinlock_t orig_hash_lock;
extern spinlock_t forw_bat_list_lock; extern spinlock_t forw_bat_list_lock;
extern spinlock_t forw_bcast_list_lock; extern spinlock_t forw_bcast_list_lock;
extern atomic_t bcast_queue_left;
extern atomic_t batman_queue_left;
extern int16_t num_hna; extern int16_t num_hna;
extern struct net_device *soft_device; extern struct net_device *soft_device;
......
...@@ -404,7 +404,7 @@ int add_bcast_packet_to_list(struct sk_buff *skb) ...@@ -404,7 +404,7 @@ int add_bcast_packet_to_list(struct sk_buff *skb)
/* FIXME: each batman_if will be attached to a softif */ /* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device); struct bat_priv *bat_priv = netdev_priv(soft_device);
if (!atomic_dec_not_zero(&bcast_queue_left)) { if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
goto out; goto out;
} }
...@@ -436,7 +436,7 @@ int add_bcast_packet_to_list(struct sk_buff *skb) ...@@ -436,7 +436,7 @@ int add_bcast_packet_to_list(struct sk_buff *skb)
packet_free: packet_free:
kfree(forw_packet); kfree(forw_packet);
out_and_inc: out_and_inc:
atomic_inc(&bcast_queue_left); atomic_inc(&bat_priv->bcast_queue_left);
out: out:
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -450,6 +450,8 @@ static void send_outstanding_bcast_packet(struct work_struct *work) ...@@ -450,6 +450,8 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
container_of(delayed_work, struct forw_packet, delayed_work); container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags; unsigned long flags;
struct sk_buff *skb1; struct sk_buff *skb1;
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
spin_lock_irqsave(&forw_bcast_list_lock, flags); spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list); hlist_del(&forw_packet->list);
...@@ -479,7 +481,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) ...@@ -479,7 +481,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
out: out:
forw_packet_free(forw_packet); forw_packet_free(forw_packet);
atomic_inc(&bcast_queue_left); atomic_inc(&bat_priv->bcast_queue_left);
} }
void send_outstanding_bat_packet(struct work_struct *work) void send_outstanding_bat_packet(struct work_struct *work)
...@@ -489,6 +491,8 @@ void send_outstanding_bat_packet(struct work_struct *work) ...@@ -489,6 +491,8 @@ void send_outstanding_bat_packet(struct work_struct *work)
struct forw_packet *forw_packet = struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work); container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags; unsigned long flags;
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
spin_lock_irqsave(&forw_bat_list_lock, flags); spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_del(&forw_packet->list); hlist_del(&forw_packet->list);
...@@ -510,7 +514,7 @@ void send_outstanding_bat_packet(struct work_struct *work) ...@@ -510,7 +514,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
out: out:
/* don't count own packet */ /* don't count own packet */
if (!forw_packet->own) if (!forw_packet->own)
atomic_inc(&batman_queue_left); atomic_inc(&bat_priv->batman_queue_left);
forw_packet_free(forw_packet); forw_packet_free(forw_packet);
} }
......
...@@ -113,6 +113,8 @@ struct bat_priv { ...@@ -113,6 +113,8 @@ struct bat_priv {
atomic_t vis_mode; atomic_t vis_mode;
atomic_t orig_interval; atomic_t orig_interval;
atomic_t log_level; atomic_t log_level;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
char num_ifaces; char num_ifaces;
struct debug_log *debug_log; struct debug_log *debug_log;
struct batman_if *primary_if; struct batman_if *primary_if;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment