Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
b8cec4a4
Commit
b8cec4a4
authored
Mar 07, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'batman-adv/next' of
git://git.open-mesh.org/ecsv/linux-merge
parents
5e2b61f7
e44d8fe2
Changes
24
Show whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
1634 additions
and
1232 deletions
+1634
-1232
net/batman-adv/aggregation.c
net/batman-adv/aggregation.c
+4
-4
net/batman-adv/aggregation.h
net/batman-adv/aggregation.h
+2
-2
net/batman-adv/bat_sysfs.c
net/batman-adv/bat_sysfs.c
+26
-25
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.c
+90
-50
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.c
+208
-199
net/batman-adv/hard-interface.h
net/batman-adv/hard-interface.h
+7
-8
net/batman-adv/hash.c
net/batman-adv/hash.c
+17
-9
net/batman-adv/hash.h
net/batman-adv/hash.h
+45
-67
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.c
+22
-18
net/batman-adv/main.c
net/batman-adv/main.c
+6
-7
net/batman-adv/main.h
net/batman-adv/main.h
+11
-1
net/batman-adv/originator.c
net/batman-adv/originator.c
+157
-95
net/batman-adv/originator.h
net/batman-adv/originator.h
+44
-6
net/batman-adv/routing.c
net/batman-adv/routing.c
+566
-417
net/batman-adv/routing.h
net/batman-adv/routing.h
+13
-12
net/batman-adv/send.c
net/batman-adv/send.c
+52
-51
net/batman-adv/send.h
net/batman-adv/send.h
+4
-4
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.c
+32
-42
net/batman-adv/soft-interface.h
net/batman-adv/soft-interface.h
+2
-1
net/batman-adv/translation-table.c
net/batman-adv/translation-table.c
+140
-65
net/batman-adv/types.h
net/batman-adv/types.h
+29
-19
net/batman-adv/unicast.c
net/batman-adv/unicast.c
+46
-47
net/batman-adv/unicast.h
net/batman-adv/unicast.h
+1
-1
net/batman-adv/vis.c
net/batman-adv/vis.c
+110
-82
No files found.
net/batman-adv/aggregation.c
View file @
b8cec4a4
...
...
@@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
int
packet_len
,
unsigned
long
send_time
,
bool
directlink
,
struct
batman_if
*
if_incoming
,
struct
hard_iface
*
if_incoming
,
struct
forw_packet
*
forw_packet
)
{
struct
batman_packet
*
batman_packet
=
...
...
@@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
/* create a new aggregated packet and add this packet to it */
static
void
new_aggregated_packet
(
unsigned
char
*
packet_buff
,
int
packet_len
,
unsigned
long
send_time
,
bool
direct_link
,
struct
batman_if
*
if_incoming
,
struct
hard_iface
*
if_incoming
,
int
own_packet
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
...
...
@@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
void
add_bat_packet_to_list
(
struct
bat_priv
*
bat_priv
,
unsigned
char
*
packet_buff
,
int
packet_len
,
struct
batman_if
*
if_incoming
,
char
own_packet
,
struct
hard_iface
*
if_incoming
,
char
own_packet
,
unsigned
long
send_time
)
{
/**
...
...
@@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
/* unpack the aggregated packets and process them one by one */
void
receive_aggr_bat_packet
(
struct
ethhdr
*
ethhdr
,
unsigned
char
*
packet_buff
,
int
packet_len
,
struct
batman_if
*
if_incoming
)
int
packet_len
,
struct
hard_iface
*
if_incoming
)
{
struct
batman_packet
*
batman_packet
;
int
buff_pos
=
0
;
...
...
net/batman-adv/aggregation.h
View file @
b8cec4a4
...
...
@@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
void
add_bat_packet_to_list
(
struct
bat_priv
*
bat_priv
,
unsigned
char
*
packet_buff
,
int
packet_len
,
struct
batman_if
*
if_incoming
,
char
own_packet
,
struct
hard_iface
*
if_incoming
,
char
own_packet
,
unsigned
long
send_time
);
void
receive_aggr_bat_packet
(
struct
ethhdr
*
ethhdr
,
unsigned
char
*
packet_buff
,
int
packet_len
,
struct
batman_if
*
if_incoming
);
int
packet_len
,
struct
hard_iface
*
if_incoming
);
#endif
/* _NET_BATMAN_ADV_AGGREGATION_H_ */
net/batman-adv/bat_sysfs.c
View file @
b8cec4a4
...
...
@@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
char
*
buff
)
{
struct
net_device
*
net_dev
=
kobj_to_netdev
(
kobj
);
struct
batman_if
*
batman_if
=
get_batman_if
_by_netdev
(
net_dev
);
struct
hard_iface
*
hard_iface
=
hardif_get
_by_netdev
(
net_dev
);
ssize_t
length
;
if
(
!
batman_if
)
if
(
!
hard_iface
)
return
0
;
length
=
sprintf
(
buff
,
"%s
\n
"
,
batman_if
->
if_status
==
IF_NOT_IN_USE
?
"none"
:
batman_if
->
soft_iface
->
name
);
length
=
sprintf
(
buff
,
"%s
\n
"
,
hard_iface
->
if_status
==
IF_NOT_IN_USE
?
"none"
:
hard_iface
->
soft_iface
->
name
);
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
hardif_free_ref
(
hard_iface
);
return
length
;
}
...
...
@@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
char
*
buff
,
size_t
count
)
{
struct
net_device
*
net_dev
=
kobj_to_netdev
(
kobj
);
struct
batman_if
*
batman_if
=
get_batman_if
_by_netdev
(
net_dev
);
struct
hard_iface
*
hard_iface
=
hardif_get
_by_netdev
(
net_dev
);
int
status_tmp
=
-
1
;
int
ret
;
int
ret
=
count
;
if
(
!
batman_if
)
if
(
!
hard_iface
)
return
count
;
if
(
buff
[
count
-
1
]
==
'\n'
)
...
...
@@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
if
(
strlen
(
buff
)
>=
IFNAMSIZ
)
{
pr_err
(
"Invalid parameter for 'mesh_iface' setting received: "
"interface name too long '%s'
\n
"
,
buff
);
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
hardif_free_ref
(
hard_iface
);
return
-
EINVAL
;
}
...
...
@@ -481,30 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
else
status_tmp
=
IF_I_WANT_YOU
;
if
((
batman_if
->
if_status
==
status_tmp
)
||
((
batman_if
->
soft_iface
)
&&
(
strncmp
(
batman_if
->
soft_iface
->
name
,
buff
,
IFNAMSIZ
)
==
0
)))
{
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
return
count
;
}
if
(
hard_iface
->
if_status
==
status_tmp
)
goto
out
;
if
((
hard_iface
->
soft_iface
)
&&
(
strncmp
(
hard_iface
->
soft_iface
->
name
,
buff
,
IFNAMSIZ
)
==
0
))
goto
out
;
if
(
status_tmp
==
IF_NOT_IN_USE
)
{
rtnl_lock
();
hardif_disable_interface
(
batman_if
);
hardif_disable_interface
(
hard_iface
);
rtnl_unlock
();
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
return
count
;
goto
out
;
}
/* if the interface already is in use */
if
(
batman_if
->
if_status
!=
IF_NOT_IN_USE
)
{
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
{
rtnl_lock
();
hardif_disable_interface
(
batman_if
);
hardif_disable_interface
(
hard_iface
);
rtnl_unlock
();
}
ret
=
hardif_enable_interface
(
batman_if
,
buff
);
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
ret
=
hardif_enable_interface
(
hard_iface
,
buff
);
out:
hardif_free_ref
(
hard_iface
);
return
ret
;
}
...
...
@@ -512,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
char
*
buff
)
{
struct
net_device
*
net_dev
=
kobj_to_netdev
(
kobj
);
struct
batman_if
*
batman_if
=
get_batman_if
_by_netdev
(
net_dev
);
struct
hard_iface
*
hard_iface
=
hardif_get
_by_netdev
(
net_dev
);
ssize_t
length
;
if
(
!
batman_if
)
if
(
!
hard_iface
)
return
0
;
switch
(
batman_if
->
if_status
)
{
switch
(
hard_iface
->
if_status
)
{
case
IF_TO_BE_REMOVED
:
length
=
sprintf
(
buff
,
"disabling
\n
"
);
break
;
...
...
@@ -537,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
break
;
}
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
hardif_free_ref
(
hard_iface
);
return
length
;
}
...
...
net/batman-adv/gateway_client.c
View file @
b8cec4a4
...
...
@@ -28,58 +28,75 @@
#include <linux/udp.h>
#include <linux/if_vlan.h>
static
void
gw_node_free_r
ef
(
struct
kref
*
refcount
)
static
void
gw_node_free_r
cu
(
struct
rcu_head
*
rcu
)
{
struct
gw_node
*
gw_node
;
gw_node
=
container_of
(
r
efcount
,
struct
gw_node
,
refcount
);
gw_node
=
container_of
(
r
cu
,
struct
gw_node
,
rcu
);
kfree
(
gw_node
);
}
static
void
gw_node_free_r
cu
(
struct
rcu_head
*
rcu
)
static
void
gw_node_free_r
ef
(
struct
gw_node
*
gw_node
)
{
struct
gw_node
*
gw_node
;
gw_node
=
container_of
(
rcu
,
struct
gw_node
,
rcu
);
kref_put
(
&
gw_node
->
refcount
,
gw_node_free_ref
);
if
(
atomic_dec_and_test
(
&
gw_node
->
refcount
))
call_rcu
(
&
gw_node
->
rcu
,
gw_node_free_rcu
);
}
void
*
gw_get_selected
(
struct
bat_priv
*
bat_priv
)
{
struct
gw_node
*
curr_gateway_tmp
=
bat_priv
->
curr_gw
;
struct
gw_node
*
curr_gateway_tmp
;
struct
orig_node
*
orig_node
=
NULL
;
rcu_read_lock
();
curr_gateway_tmp
=
rcu_dereference
(
bat_priv
->
curr_gw
);
if
(
!
curr_gateway_tmp
)
return
NULL
;
goto
out
;
orig_node
=
curr_gateway_tmp
->
orig_node
;
if
(
!
orig_node
)
goto
out
;
if
(
!
atomic_inc_not_zero
(
&
orig_node
->
refcount
))
orig_node
=
NULL
;
return
curr_gateway_tmp
->
orig_node
;
out:
rcu_read_unlock
();
return
orig_node
;
}
void
gw_deselect
(
struct
bat_priv
*
bat_priv
)
{
struct
gw_node
*
gw_node
=
bat_priv
->
curr_gw
;
struct
gw_node
*
gw_node
;
bat_priv
->
curr_gw
=
NULL
;
spin_lock_bh
(
&
bat_priv
->
gw_list_lock
);
gw_node
=
rcu_dereference
(
bat_priv
->
curr_gw
);
rcu_assign_pointer
(
bat_priv
->
curr_gw
,
NULL
);
spin_unlock_bh
(
&
bat_priv
->
gw_list_lock
);
if
(
gw_node
)
kref_put
(
&
gw_node
->
refcount
,
gw_node_free_ref
);
gw_node_free_ref
(
gw_node
);
}
static
struct
gw_node
*
gw_select
(
struct
bat_priv
*
bat_priv
,
struct
gw_node
*
new_gw_node
)
static
void
gw_select
(
struct
bat_priv
*
bat_priv
,
struct
gw_node
*
new_gw_node
)
{
struct
gw_node
*
curr_gw_node
=
bat_priv
->
curr_gw
;
struct
gw_node
*
curr_gw_node
;
if
(
new_gw_node
&&
!
atomic_inc_not_zero
(
&
new_gw_node
->
refcount
))
new_gw_node
=
NULL
;
if
(
new_gw_node
)
kref_get
(
&
new_gw_node
->
refcount
);
spin_lock_bh
(
&
bat_priv
->
gw_list_lock
);
curr_gw_node
=
rcu_dereference
(
bat_priv
->
curr_gw
);
rcu_assign_pointer
(
bat_priv
->
curr_gw
,
new_gw_node
);
spin_unlock_bh
(
&
bat_priv
->
gw_list_lock
);
bat_priv
->
curr_gw
=
new_gw_node
;
return
curr_gw_node
;
if
(
curr_gw_node
)
gw_node_free_ref
(
curr_gw_node
)
;
}
void
gw_election
(
struct
bat_priv
*
bat_priv
)
{
struct
hlist_node
*
node
;
struct
gw_node
*
gw_node
,
*
curr_gw
_tmp
=
NULL
,
*
old_gw_node
=
NULL
;
struct
gw_node
*
gw_node
,
*
curr_gw
,
*
curr_gw_tmp
=
NULL
;
uint8_t
max_tq
=
0
;
uint32_t
max_gw_factor
=
0
,
tmp_gw_factor
=
0
;
int
down
,
up
;
...
...
@@ -93,19 +110,23 @@ void gw_election(struct bat_priv *bat_priv)
if
(
atomic_read
(
&
bat_priv
->
gw_mode
)
!=
GW_MODE_CLIENT
)
return
;
if
(
bat_priv
->
curr_gw
)
rcu_read_lock
();
curr_gw
=
rcu_dereference
(
bat_priv
->
curr_gw
);
if
(
curr_gw
)
{
rcu_read_unlock
();
return
;
}
rcu_read_lock
();
if
(
hlist_empty
(
&
bat_priv
->
gw_list
))
{
rcu_read_unlock
();
if
(
bat_priv
->
curr_gw
)
{
if
(
curr_gw
)
{
rcu_read_unlock
();
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Removing selected gateway - "
"no gateway in range
\n
"
);
gw_deselect
(
bat_priv
);
}
}
else
rcu_read_unlock
();
return
;
}
...
...
@@ -154,12 +175,12 @@ void gw_election(struct bat_priv *bat_priv)
max_gw_factor
=
tmp_gw_factor
;
}
if
(
bat_priv
->
curr_gw
!=
curr_gw_tmp
)
{
if
((
bat_priv
->
curr_gw
)
&&
(
!
curr_gw_tmp
))
if
(
curr_gw
!=
curr_gw_tmp
)
{
if
((
curr_gw
)
&&
(
!
curr_gw_tmp
))
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Removing selected gateway - "
"no gateway in range
\n
"
);
else
if
((
!
bat_priv
->
curr_gw
)
&&
(
curr_gw_tmp
))
else
if
((
!
curr_gw
)
&&
(
curr_gw_tmp
))
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Adding route to gateway %pM "
"(gw_flags: %i, tq: %i)
\n
"
,
...
...
@@ -174,43 +195,43 @@ void gw_election(struct bat_priv *bat_priv)
curr_gw_tmp
->
orig_node
->
gw_flags
,
curr_gw_tmp
->
orig_node
->
router
->
tq_avg
);
old_gw_node
=
gw_select
(
bat_priv
,
curr_gw_tmp
);
gw_select
(
bat_priv
,
curr_gw_tmp
);
}
rcu_read_unlock
();
/* the kfree() has to be outside of the rcu lock */
if
(
old_gw_node
)
kref_put
(
&
old_gw_node
->
refcount
,
gw_node_free_ref
);
}
void
gw_check_election
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
)
{
struct
gw_node
*
curr_gateway_tmp
=
bat_priv
->
curr_gw
;
struct
gw_node
*
curr_gateway_tmp
;
uint8_t
gw_tq_avg
,
orig_tq_avg
;
rcu_read_lock
();
curr_gateway_tmp
=
rcu_dereference
(
bat_priv
->
curr_gw
);
if
(
!
curr_gateway_tmp
)
return
;
goto
out_rcu
;
if
(
!
curr_gateway_tmp
->
orig_node
)
goto
deselect
;
goto
deselect
_rcu
;
if
(
!
curr_gateway_tmp
->
orig_node
->
router
)
goto
deselect
;
goto
deselect
_rcu
;
/* this node already is the gateway */
if
(
curr_gateway_tmp
->
orig_node
==
orig_node
)
return
;
goto
out_rcu
;
if
(
!
orig_node
->
router
)
return
;
goto
out_rcu
;
gw_tq_avg
=
curr_gateway_tmp
->
orig_node
->
router
->
tq_avg
;
rcu_read_unlock
();
orig_tq_avg
=
orig_node
->
router
->
tq_avg
;
/* the TQ value has to be better */
if
(
orig_tq_avg
<
gw_tq_avg
)
return
;
goto
out
;
/**
* if the routing class is greater than 3 the value tells us how much
...
...
@@ -218,15 +239,23 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
**/
if
((
atomic_read
(
&
bat_priv
->
gw_sel_class
)
>
3
)
&&
(
orig_tq_avg
-
gw_tq_avg
<
atomic_read
(
&
bat_priv
->
gw_sel_class
)))
return
;
goto
out
;
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Restarting gateway selection: better gateway found (tq curr: "
"%i, tq new: %i)
\n
"
,
gw_tq_avg
,
orig_tq_avg
);
goto
deselect
;
out_rcu:
rcu_read_unlock
();
goto
out
;
deselect_rcu:
rcu_read_unlock
();
deselect:
gw_deselect
(
bat_priv
);
out:
return
;
}
static
void
gw_node_add
(
struct
bat_priv
*
bat_priv
,
...
...
@@ -242,7 +271,7 @@ static void gw_node_add(struct bat_priv *bat_priv,
memset
(
gw_node
,
0
,
sizeof
(
struct
gw_node
));
INIT_HLIST_NODE
(
&
gw_node
->
list
);
gw_node
->
orig_node
=
orig_node
;
kref_init
(
&
gw_node
->
refcount
);
atomic_set
(
&
gw_node
->
refcount
,
1
);
spin_lock_bh
(
&
bat_priv
->
gw_list_lock
);
hlist_add_head_rcu
(
&
gw_node
->
list
,
&
bat_priv
->
gw_list
);
...
...
@@ -283,7 +312,7 @@ void gw_node_update(struct bat_priv *bat_priv,
"Gateway %pM removed from gateway list
\n
"
,
orig_node
->
orig
);
if
(
gw_node
==
bat_priv
->
curr_gw
)
{
if
(
gw_node
==
rcu_dereference
(
bat_priv
->
curr_gw
)
)
{
rcu_read_unlock
();
gw_deselect
(
bat_priv
);
return
;
...
...
@@ -321,11 +350,11 @@ void gw_node_purge(struct bat_priv *bat_priv)
atomic_read
(
&
bat_priv
->
mesh_state
)
==
MESH_ACTIVE
)
continue
;
if
(
bat_priv
->
curr_gw
==
gw_node
)
if
(
rcu_dereference
(
bat_priv
->
curr_gw
)
==
gw_node
)
gw_deselect
(
bat_priv
);
hlist_del_rcu
(
&
gw_node
->
list
);
call_rcu
(
&
gw_node
->
rcu
,
gw_node_free_rcu
);
gw_node_free_ref
(
gw_node
);
}
...
...
@@ -335,12 +364,16 @@ void gw_node_purge(struct bat_priv *bat_priv)
static
int
_write_buffer_text
(
struct
bat_priv
*
bat_priv
,
struct
seq_file
*
seq
,
struct
gw_node
*
gw_node
)
{
int
down
,
up
;
struct
gw_node
*
curr_gw
;
int
down
,
up
,
ret
;
gw_bandwidth_to_kbit
(
gw_node
->
orig_node
->
gw_flags
,
&
down
,
&
up
);
return
seq_printf
(
seq
,
"%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s
\n
"
,
(
bat_priv
->
curr_gw
==
gw_node
?
"=>"
:
" "
),
rcu_read_lock
();
curr_gw
=
rcu_dereference
(
bat_priv
->
curr_gw
);
ret
=
seq_printf
(
seq
,
"%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s
\n
"
,
(
curr_gw
==
gw_node
?
"=>"
:
" "
),
gw_node
->
orig_node
->
orig
,
gw_node
->
orig_node
->
router
->
tq_avg
,
gw_node
->
orig_node
->
router
->
addr
,
...
...
@@ -350,6 +383,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv,
(
down
>
2048
?
"MBit"
:
"KBit"
),
(
up
>
2048
?
up
/
1024
:
up
),
(
up
>
2048
?
"MBit"
:
"KBit"
));
rcu_read_unlock
();
return
ret
;
}
int
gw_client_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
...
...
@@ -470,8 +506,12 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
if
(
atomic_read
(
&
bat_priv
->
gw_mode
)
==
GW_MODE_SERVER
)
return
-
1
;
if
(
!
bat_priv
->
curr_gw
)
rcu_read_lock
();
if
(
!
rcu_dereference
(
bat_priv
->
curr_gw
))
{
rcu_read_unlock
();
return
0
;
}
rcu_read_unlock
();
return
1
;
}
net/batman-adv/hard-interface.c
View file @
b8cec4a4
...
...
@@ -31,8 +31,8 @@
#include <linux/if_arp.h>
/* protect update critical side of if_list - but not the content */
static
DEFINE_SPINLOCK
(
if_list_lock
);
/* protect update critical side of
hard
if_list - but not the content */
static
DEFINE_SPINLOCK
(
hard
if_list_lock
);
static
int
batman_skb_recv
(
struct
sk_buff
*
skb
,
...
...
@@ -40,33 +40,31 @@ static int batman_skb_recv(struct sk_buff *skb,
struct
packet_type
*
ptype
,
struct
net_device
*
orig_dev
);
static
void
hardif_free_rcu
(
struct
rcu_head
*
rcu
)
void
hardif_free_rcu
(
struct
rcu_head
*
rcu
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
batman_if
=
container_of
(
rcu
,
struct
batman_if
,
rcu
);
dev_put
(
batman_if
->
net_dev
);
k
ref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
hard_iface
=
container_of
(
rcu
,
struct
hard_iface
,
rcu
);
dev_put
(
hard_iface
->
net_dev
);
k
free
(
hard_iface
);
}
struct
batman_if
*
get_batman_if
_by_netdev
(
struct
net_device
*
net_dev
)
struct
hard_iface
*
hardif_get
_by_netdev
(
struct
net_device
*
net_dev
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
(
batman_if
->
net_dev
==
net_dev
)
list_for_each_entry_rcu
(
hard_iface
,
&
hardif_list
,
list
)
{
if
(
hard_iface
->
net_dev
==
net_dev
&&
atomic_inc_not_zero
(
&
hard_iface
->
refcount
))
goto
out
;
}
batman_if
=
NULL
;
hard_iface
=
NULL
;
out:
if
(
batman_if
)
kref_get
(
&
batman_if
->
refcount
);
rcu_read_unlock
();
return
batman_if
;
return
hard_iface
;
}
static
int
is_valid_iface
(
struct
net_device
*
net_dev
)
...
...
@@ -81,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev)
return
0
;
/* no batman over batman */
#ifdef HAVE_NET_DEVICE_OPS
if
(
net_dev
->
netdev_ops
->
ndo_start_xmit
==
interface_tx
)
return
0
;
#else
if
(
net_dev
->
hard_start_xmit
==
interface_tx
)
if
(
softif_is_valid
(
net_dev
))
return
0
;
#endif
/* Device is being bridged */
/* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
...
...
@@ -96,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev)
return
1
;
}
static
struct
batman_if
*
get_active_batman_if
(
struct
net_device
*
soft_iface
)
static
struct
hard_iface
*
hardif_get_active
(
struct
net_device
*
soft_iface
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
(
batman_if
->
soft_iface
!=
soft_iface
)
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
(
hard_iface
->
soft_iface
!=
soft_iface
)
continue
;
if
(
batman_if
->
if_status
==
IF_ACTIVE
)
if
(
hard_iface
->
if_status
==
IF_ACTIVE
&&
atomic_inc_not_zero
(
&
hard_iface
->
refcount
))
goto
out
;
}
batman_if
=
NULL
;
hard_iface
=
NULL
;
out:
if
(
batman_if
)
kref_get
(
&
batman_if
->
refcount
);
rcu_read_unlock
();
return
batman_if
;
return
hard_iface
;
}
static
void
update_primary_addr
(
struct
bat_priv
*
bat_priv
)
...
...
@@ -132,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv)
}
static
void
set_primary_if
(
struct
bat_priv
*
bat_priv
,
struct
batman_if
*
batman_if
)
struct
hard_iface
*
hard_iface
)
{
struct
batman_packet
*
batman_packet
;
struct
batman_if
*
old_if
;
struct
hard_iface
*
old_if
;
if
(
batman_if
)
kref_get
(
&
batman_if
->
refcount
)
;
if
(
hard_iface
&&
!
atomic_inc_not_zero
(
&
hard_iface
->
refcount
)
)
hard_iface
=
NULL
;
old_if
=
bat_priv
->
primary_if
;
bat_priv
->
primary_if
=
batman_if
;
bat_priv
->
primary_if
=
hard_iface
;
if
(
old_if
)
kref_put
(
&
old_if
->
refcount
,
hardif_free_re
f
);
hardif_free_ref
(
old_i
f
);
if
(
!
bat_priv
->
primary_if
)
return
;
batman_packet
=
(
struct
batman_packet
*
)(
batman_if
->
packet_buff
);
batman_packet
=
(
struct
batman_packet
*
)(
hard_iface
->
packet_buff
);
batman_packet
->
flags
=
PRIMARIES_FIRST_HOP
;
batman_packet
->
ttl
=
TTL
;
...
...
@@ -162,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv,
atomic_set
(
&
bat_priv
->
hna_local_changed
,
1
);
}
static
bool
hardif_is_iface_up
(
struct
batman_if
*
batman_if
)
static
bool
hardif_is_iface_up
(
struct
hard_iface
*
hard_iface
)
{
if
(
batman_if
->
net_dev
->
flags
&
IFF_UP
)
if
(
hard_iface
->
net_dev
->
flags
&
IFF_UP
)
return
true
;
return
false
;
}
static
void
update_mac_addresses
(
struct
batman_if
*
batman_if
)
static
void
update_mac_addresses
(
struct
hard_iface
*
hard_iface
)
{
memcpy
(((
struct
batman_packet
*
)(
batman_if
->
packet_buff
))
->
orig
,
batman_if
->
net_dev
->
dev_addr
,
ETH_ALEN
);
memcpy
(((
struct
batman_packet
*
)(
batman_if
->
packet_buff
))
->
prev_sender
,
batman_if
->
net_dev
->
dev_addr
,
ETH_ALEN
);
memcpy
(((
struct
batman_packet
*
)(
hard_iface
->
packet_buff
))
->
orig
,
hard_iface
->
net_dev
->
dev_addr
,
ETH_ALEN
);
memcpy
(((
struct
batman_packet
*
)(
hard_iface
->
packet_buff
))
->
prev_sender
,
hard_iface
->
net_dev
->
dev_addr
,
ETH_ALEN
);
}
static
void
check_known_mac_addr
(
struct
net_device
*
net_dev
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
((
batman_if
->
if_status
!=
IF_ACTIVE
)
&&
(
batman_if
->
if_status
!=
IF_TO_BE_ACTIVATED
))
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
((
hard_iface
->
if_status
!=
IF_ACTIVE
)
&&
(
hard_iface
->
if_status
!=
IF_TO_BE_ACTIVATED
))
continue
;
if
(
batman_if
->
net_dev
==
net_dev
)
if
(
hard_iface
->
net_dev
==
net_dev
)
continue
;
if
(
!
compare_
orig
(
batman_if
->
net_dev
->
dev_addr
,
if
(
!
compare_
eth
(
hard_iface
->
net_dev
->
dev_addr
,
net_dev
->
dev_addr
))
continue
;
pr_warning
(
"The newly added mac address (%pM) already exists "
"on: %s
\n
"
,
net_dev
->
dev_addr
,
batman_if
->
net_dev
->
name
);
hard_iface
->
net_dev
->
name
);
pr_warning
(
"It is strongly recommended to keep mac addresses "
"unique to avoid problems!
\n
"
);
}
...
...
@@ -207,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev)
int
hardif_min_mtu
(
struct
net_device
*
soft_iface
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
soft_iface
);
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
/* allow big frames if all devices are capable to do so
* (have MTU > 1500 + BAT_HEADER_LEN) */
int
min_mtu
=
ETH_DATA_LEN
;
...
...
@@ -216,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface)
goto
out
;
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
((
batman_if
->
if_status
!=
IF_ACTIVE
)
&&
(
batman_if
->
if_status
!=
IF_TO_BE_ACTIVATED
))
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
((
hard_iface
->
if_status
!=
IF_ACTIVE
)
&&
(
hard_iface
->
if_status
!=
IF_TO_BE_ACTIVATED
))
continue
;
if
(
batman_if
->
soft_iface
!=
soft_iface
)
if
(
hard_iface
->
soft_iface
!=
soft_iface
)
continue
;
min_mtu
=
min_t
(
int
,
batman_if
->
net_dev
->
mtu
-
BAT_HEADER_LEN
,
min_mtu
=
min_t
(
int
,
hard_iface
->
net_dev
->
mtu
-
BAT_HEADER_LEN
,
min_mtu
);
}
rcu_read_unlock
();
...
...
@@ -242,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface)
soft_iface
->
mtu
=
min_mtu
;
}
static
void
hardif_activate_interface
(
struct
batman_if
*
batman_if
)
static
void
hardif_activate_interface
(
struct
hard_iface
*
hard_iface
)
{
struct
bat_priv
*
bat_priv
;
if
(
batman_if
->
if_status
!=
IF_INACTIVE
)
if
(
hard_iface
->
if_status
!=
IF_INACTIVE
)
return
;
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
update_mac_addresses
(
batman_if
);
batman_if
->
if_status
=
IF_TO_BE_ACTIVATED
;
update_mac_addresses
(
hard_iface
);
hard_iface
->
if_status
=
IF_TO_BE_ACTIVATED
;
/**
* the first active interface becomes our primary interface or
* the next active interface after the old primay interface was removed
*/
if
(
!
bat_priv
->
primary_if
)
set_primary_if
(
bat_priv
,
batman_if
);
set_primary_if
(
bat_priv
,
hard_iface
);
bat_info
(
batman_if
->
soft_iface
,
"Interface activated: %s
\n
"
,
batman_if
->
net_dev
->
name
);
bat_info
(
hard_iface
->
soft_iface
,
"Interface activated: %s
\n
"
,
hard_iface
->
net_dev
->
name
);
update_min_mtu
(
batman_if
->
soft_iface
);
update_min_mtu
(
hard_iface
->
soft_iface
);
return
;
}
static
void
hardif_deactivate_interface
(
struct
batman_if
*
batman_if
)
static
void
hardif_deactivate_interface
(
struct
hard_iface
*
hard_iface
)
{
if
((
batman_if
->
if_status
!=
IF_ACTIVE
)
&&
(
batman_if
->
if_status
!=
IF_TO_BE_ACTIVATED
))
if
((
hard_iface
->
if_status
!=
IF_ACTIVE
)
&&
(
hard_iface
->
if_status
!=
IF_TO_BE_ACTIVATED
))
return
;
batman_if
->
if_status
=
IF_INACTIVE
;
hard_iface
->
if_status
=
IF_INACTIVE
;
bat_info
(
batman_if
->
soft_iface
,
"Interface deactivated: %s
\n
"
,
batman_if
->
net_dev
->
name
);
bat_info
(
hard_iface
->
soft_iface
,
"Interface deactivated: %s
\n
"
,
hard_iface
->
net_dev
->
name
);
update_min_mtu
(
batman_if
->
soft_iface
);
update_min_mtu
(
hard_iface
->
soft_iface
);
}
int
hardif_enable_interface
(
struct
batman_if
*
batman_if
,
char
*
iface_name
)
int
hardif_enable_interface
(
struct
hard_iface
*
hard_iface
,
char
*
iface_name
)
{
struct
bat_priv
*
bat_priv
;
struct
batman_packet
*
batman_packet
;
struct
net_device
*
soft_iface
;
int
ret
;
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
goto
out
;
if
(
batman_if
->
if_status
!=
IF_NOT_IN_USE
)
if
(
!
atomic_inc_not_zero
(
&
hard_iface
->
refcount
)
)
goto
out
;
batman_if
->
soft_iface
=
dev_get_by_name
(
&
init_net
,
iface_name
);
soft_iface
=
dev_get_by_name
(
&
init_net
,
iface_name
);
if
(
!
batman_if
->
soft_iface
)
{
batman_if
->
soft_iface
=
softif_create
(
iface_name
);
if
(
!
soft_iface
)
{
soft_iface
=
softif_create
(
iface_name
);
if
(
!
batman_if
->
soft_iface
)
if
(
!
soft_iface
)
{
ret
=
-
ENOMEM
;
goto
err
;
}
/* dev_get_by_name() increases the reference counter for us */
dev_hold
(
batman_if
->
soft_iface
);
dev_hold
(
soft_iface
);
}
if
(
!
softif_is_valid
(
soft_iface
))
{
pr_err
(
"Can't create batman mesh interface %s: "
"already exists as regular interface
\n
"
,
soft_iface
->
name
);
dev_put
(
soft_iface
);
ret
=
-
EINVAL
;
goto
err
;
}
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
batman_if
->
packet_len
=
BAT_PACKET_LEN
;
batman_if
->
packet_buff
=
kmalloc
(
batman_if
->
packet_len
,
GFP_ATOMIC
);
hard_iface
->
soft_iface
=
soft_iface
;
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
hard_iface
->
packet_len
=
BAT_PACKET_LEN
;
hard_iface
->
packet_buff
=
kmalloc
(
hard_iface
->
packet_len
,
GFP_ATOMIC
);
if
(
!
batman_if
->
packet_buff
)
{
bat_err
(
batman_if
->
soft_iface
,
"Can't add interface packet "
"(%s): out of memory
\n
"
,
batman_if
->
net_dev
->
name
);
if
(
!
hard_iface
->
packet_buff
)
{
bat_err
(
hard_iface
->
soft_iface
,
"Can't add interface packet "
"(%s): out of memory
\n
"
,
hard_iface
->
net_dev
->
name
);
ret
=
-
ENOMEM
;
goto
err
;
}
batman_packet
=
(
struct
batman_packet
*
)(
batman_if
->
packet_buff
);
batman_packet
=
(
struct
batman_packet
*
)(
hard_iface
->
packet_buff
);
batman_packet
->
packet_type
=
BAT_PACKET
;
batman_packet
->
version
=
COMPAT_VERSION
;
batman_packet
->
flags
=
0
;
...
...
@@ -320,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
batman_packet
->
tq
=
TQ_MAX_VALUE
;
batman_packet
->
num_hna
=
0
;
batman_if
->
if_num
=
bat_priv
->
num_ifaces
;
hard_iface
->
if_num
=
bat_priv
->
num_ifaces
;
bat_priv
->
num_ifaces
++
;
batman_if
->
if_status
=
IF_INACTIVE
;
orig_hash_add_if
(
batman_if
,
bat_priv
->
num_ifaces
);
hard_iface
->
if_status
=
IF_INACTIVE
;
orig_hash_add_if
(
hard_iface
,
bat_priv
->
num_ifaces
);
batman_if
->
batman_adv_ptype
.
type
=
__constant_htons
(
ETH_P_BATMAN
);
batman_if
->
batman_adv_ptype
.
func
=
batman_skb_recv
;
batman_if
->
batman_adv_ptype
.
dev
=
batman_if
->
net_dev
;
kref_get
(
&
batman_if
->
refcount
);
dev_add_pack
(
&
batman_if
->
batman_adv_ptype
);
hard_iface
->
batman_adv_ptype
.
type
=
__constant_htons
(
ETH_P_BATMAN
);
hard_iface
->
batman_adv_ptype
.
func
=
batman_skb_recv
;
hard_iface
->
batman_adv_ptype
.
dev
=
hard_iface
->
net_dev
;
dev_add_pack
(
&
hard_iface
->
batman_adv_ptype
);
atomic_set
(
&
batman_if
->
seqno
,
1
);
atomic_set
(
&
batman_if
->
frag_seqno
,
1
);
bat_info
(
batman_if
->
soft_iface
,
"Adding interface: %s
\n
"
,
batman_if
->
net_dev
->
name
);
atomic_set
(
&
hard_iface
->
seqno
,
1
);
atomic_set
(
&
hard_iface
->
frag_seqno
,
1
);
bat_info
(
hard_iface
->
soft_iface
,
"Adding interface: %s
\n
"
,
hard_iface
->
net_dev
->
name
);
if
(
atomic_read
(
&
bat_priv
->
fragmentation
)
&&
batman_if
->
net_dev
->
mtu
<
if
(
atomic_read
(
&
bat_priv
->
fragmentation
)
&&
hard_iface
->
net_dev
->
mtu
<
ETH_DATA_LEN
+
BAT_HEADER_LEN
)
bat_info
(
batman_if
->
soft_iface
,
bat_info
(
hard_iface
->
soft_iface
,
"The MTU of interface %s is too small (%i) to handle "
"the transport of batman-adv packets. Packets going "
"over this interface will be fragmented on layer2 "
"which could impact the performance. Setting the MTU "
"to %zi would solve the problem.
\n
"
,
batman_if
->
net_dev
->
name
,
batman_if
->
net_dev
->
mtu
,
hard_iface
->
net_dev
->
name
,
hard_iface
->
net_dev
->
mtu
,
ETH_DATA_LEN
+
BAT_HEADER_LEN
);
if
(
!
atomic_read
(
&
bat_priv
->
fragmentation
)
&&
batman_if
->
net_dev
->
mtu
<
if
(
!
atomic_read
(
&
bat_priv
->
fragmentation
)
&&
hard_iface
->
net_dev
->
mtu
<
ETH_DATA_LEN
+
BAT_HEADER_LEN
)
bat_info
(
batman_if
->
soft_iface
,
bat_info
(
hard_iface
->
soft_iface
,
"The MTU of interface %s is too small (%i) to handle "
"the transport of batman-adv packets. If you experience"
" problems getting traffic through try increasing the "
"MTU to %zi.
\n
"
,
batman_if
->
net_dev
->
name
,
batman_if
->
net_dev
->
mtu
,
hard_iface
->
net_dev
->
name
,
hard_iface
->
net_dev
->
mtu
,
ETH_DATA_LEN
+
BAT_HEADER_LEN
);
if
(
hardif_is_iface_up
(
batman_if
))
hardif_activate_interface
(
batman_if
);
if
(
hardif_is_iface_up
(
hard_iface
))
hardif_activate_interface
(
hard_iface
);
else
bat_err
(
batman_if
->
soft_iface
,
"Not using interface %s "
bat_err
(
hard_iface
->
soft_iface
,
"Not using interface %s "
"(retrying later): interface not active
\n
"
,
batman_if
->
net_dev
->
name
);
hard_iface
->
net_dev
->
name
);
/* begin scheduling originator messages on that interface */
schedule_own_packet
(
batman_if
);
schedule_own_packet
(
hard_iface
);
out:
return
0
;
err:
return
-
ENOMEM
;
hardif_free_ref
(
hard_iface
);
return
ret
;
}
void
hardif_disable_interface
(
struct
batman_if
*
batman_if
)
void
hardif_disable_interface
(
struct
hard_iface
*
hard_iface
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
bat_priv
*
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
if
(
batman_if
->
if_status
==
IF_ACTIVE
)
hardif_deactivate_interface
(
batman_if
);
if
(
hard_iface
->
if_status
==
IF_ACTIVE
)
hardif_deactivate_interface
(
hard_iface
);
if
(
batman_if
->
if_status
!=
IF_INACTIVE
)
if
(
hard_iface
->
if_status
!=
IF_INACTIVE
)
return
;
bat_info
(
batman_if
->
soft_iface
,
"Removing interface: %s
\n
"
,
batman_if
->
net_dev
->
name
);
dev_remove_pack
(
&
batman_if
->
batman_adv_ptype
);
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
bat_info
(
hard_iface
->
soft_iface
,
"Removing interface: %s
\n
"
,
hard_iface
->
net_dev
->
name
);
dev_remove_pack
(
&
hard_iface
->
batman_adv_ptype
);
bat_priv
->
num_ifaces
--
;
orig_hash_del_if
(
batman_if
,
bat_priv
->
num_ifaces
);
orig_hash_del_if
(
hard_iface
,
bat_priv
->
num_ifaces
);
if
(
batman_if
==
bat_priv
->
primary_if
)
{
struct
batman_if
*
new_if
;
if
(
hard_iface
==
bat_priv
->
primary_if
)
{
struct
hard_iface
*
new_if
;
new_if
=
get_active_batman_if
(
batman_if
->
soft_iface
);
new_if
=
hardif_get_active
(
hard_iface
->
soft_iface
);
set_primary_if
(
bat_priv
,
new_if
);
if
(
new_if
)
kref_put
(
&
new_if
->
refcount
,
hardif_free_re
f
);
hardif_free_ref
(
new_i
f
);
}
kfree
(
batman_if
->
packet_buff
);
batman_if
->
packet_buff
=
NULL
;
batman_if
->
if_status
=
IF_NOT_IN_USE
;
kfree
(
hard_iface
->
packet_buff
);
hard_iface
->
packet_buff
=
NULL
;
hard_iface
->
if_status
=
IF_NOT_IN_USE
;
/* delete all references to this
batman_if
*/
/* delete all references to this
hard_iface
*/
purge_orig_ref
(
bat_priv
);
purge_outstanding_packets
(
bat_priv
,
batman_if
);
dev_put
(
batman_if
->
soft_iface
);
purge_outstanding_packets
(
bat_priv
,
hard_iface
);
dev_put
(
hard_iface
->
soft_iface
);
/* nobody uses this interface anymore */
if
(
!
bat_priv
->
num_ifaces
)
softif_destroy
(
batman_if
->
soft_iface
);
softif_destroy
(
hard_iface
->
soft_iface
);
batman_if
->
soft_iface
=
NULL
;
hard_iface
->
soft_iface
=
NULL
;
hardif_free_ref
(
hard_iface
);
}
static
struct
batman_if
*
hardif_add_interface
(
struct
net_device
*
net_dev
)
static
struct
hard_iface
*
hardif_add_interface
(
struct
net_device
*
net_dev
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
int
ret
;
ret
=
is_valid_iface
(
net_dev
);
...
...
@@ -429,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
dev_hold
(
net_dev
);
batman_if
=
kmalloc
(
sizeof
(
struct
batman_if
),
GFP_ATOMIC
);
if
(
!
batman_if
)
{
hard_iface
=
kmalloc
(
sizeof
(
struct
hard_iface
),
GFP_ATOMIC
);
if
(
!
hard_iface
)
{
pr_err
(
"Can't add interface (%s): out of memory
\n
"
,
net_dev
->
name
);
goto
release_dev
;
}
ret
=
sysfs_add_hardif
(
&
batman_if
->
hardif_obj
,
net_dev
);
ret
=
sysfs_add_hardif
(
&
hard_iface
->
hardif_obj
,
net_dev
);
if
(
ret
)
goto
free_if
;
batman_if
->
if_num
=
-
1
;
batman_if
->
net_dev
=
net_dev
;
batman_if
->
soft_iface
=
NULL
;
batman_if
->
if_status
=
IF_NOT_IN_USE
;
INIT_LIST_HEAD
(
&
batman_if
->
list
);
kref_init
(
&
batman_if
->
refcount
);
hard_iface
->
if_num
=
-
1
;
hard_iface
->
net_dev
=
net_dev
;
hard_iface
->
soft_iface
=
NULL
;
hard_iface
->
if_status
=
IF_NOT_IN_USE
;
INIT_LIST_HEAD
(
&
hard_iface
->
list
);
/* extra reference for return */
atomic_set
(
&
hard_iface
->
refcount
,
2
);
check_known_mac_addr
(
batman_if
->
net_dev
);
check_known_mac_addr
(
hard_iface
->
net_dev
);
spin_lock
(
&
if_list_lock
);
list_add_tail_rcu
(
&
batman_if
->
list
,
&
if_list
);
spin_unlock
(
&
if_list_lock
);
spin_lock
(
&
hard
if_list_lock
);
list_add_tail_rcu
(
&
hard_iface
->
list
,
&
hard
if_list
);
spin_unlock
(
&
hard
if_list_lock
);
/* extra reference for return */
kref_get
(
&
batman_if
->
refcount
);
return
batman_if
;
return
hard_iface
;
free_if:
kfree
(
batman_if
);
kfree
(
hard_iface
);
release_dev:
dev_put
(
net_dev
);
out:
return
NULL
;
}
static
void
hardif_remove_interface
(
struct
batman_if
*
batman_if
)
static
void
hardif_remove_interface
(
struct
hard_iface
*
hard_iface
)
{
/* first deactivate interface */
if
(
batman_if
->
if_status
!=
IF_NOT_IN_USE
)
hardif_disable_interface
(
batman_if
);
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
hardif_disable_interface
(
hard_iface
);
if
(
batman_if
->
if_status
!=
IF_NOT_IN_USE
)
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
return
;
batman_if
->
if_status
=
IF_TO_BE_REMOVED
;
sysfs_del_hardif
(
&
batman_if
->
hardif_obj
);
call_rcu
(
&
batman_if
->
rcu
,
hardif_free_rcu
);
hard_iface
->
if_status
=
IF_TO_BE_REMOVED
;
sysfs_del_hardif
(
&
hard_iface
->
hardif_obj
);
hardif_free_ref
(
hard_iface
);
}
void
hardif_remove_interfaces
(
void
)
{
struct
batman_if
*
batman_if
,
*
batman_if
_tmp
;
struct
hard_iface
*
hard_iface
,
*
hard_iface
_tmp
;
struct
list_head
if_queue
;
INIT_LIST_HEAD
(
&
if_queue
);
spin_lock
(
&
if_list_lock
);
list_for_each_entry_safe
(
batman_if
,
batman_if_tmp
,
&
if_list
,
list
)
{
list_del_rcu
(
&
batman_if
->
list
);
list_add_tail
(
&
batman_if
->
list
,
&
if_queue
);
spin_lock
(
&
hardif_list_lock
);
list_for_each_entry_safe
(
hard_iface
,
hard_iface_tmp
,
&
hardif_list
,
list
)
{
list_del_rcu
(
&
hard_iface
->
list
);
list_add_tail
(
&
hard_iface
->
list
,
&
if_queue
);
}
spin_unlock
(
&
if_list_lock
);
spin_unlock
(
&
hard
if_list_lock
);
rtnl_lock
();
list_for_each_entry_safe
(
batman_if
,
batman_if
_tmp
,
&
if_queue
,
list
)
{
hardif_remove_interface
(
batman_if
);
list_for_each_entry_safe
(
hard_iface
,
hard_iface
_tmp
,
&
if_queue
,
list
)
{
hardif_remove_interface
(
hard_iface
);
}
rtnl_unlock
();
}
...
...
@@ -504,43 +513,43 @@ static int hard_if_event(struct notifier_block *this,
unsigned
long
event
,
void
*
ptr
)
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
ptr
;
struct
batman_if
*
batman_if
=
get_batman_if
_by_netdev
(
net_dev
);
struct
hard_iface
*
hard_iface
=
hardif_get
_by_netdev
(
net_dev
);
struct
bat_priv
*
bat_priv
;
if
(
!
batman_if
&&
event
==
NETDEV_REGISTER
)
batman_if
=
hardif_add_interface
(
net_dev
);
if
(
!
hard_iface
&&
event
==
NETDEV_REGISTER
)
hard_iface
=
hardif_add_interface
(
net_dev
);
if
(
!
batman_if
)
if
(
!
hard_iface
)
goto
out
;
switch
(
event
)
{
case
NETDEV_UP
:
hardif_activate_interface
(
batman_if
);
hardif_activate_interface
(
hard_iface
);
break
;
case
NETDEV_GOING_DOWN
:
case
NETDEV_DOWN
:
hardif_deactivate_interface
(
batman_if
);
hardif_deactivate_interface
(
hard_iface
);
break
;
case
NETDEV_UNREGISTER
:
spin_lock
(
&
if_list_lock
);
list_del_rcu
(
&
batman_if
->
list
);
spin_unlock
(
&
if_list_lock
);
spin_lock
(
&
hard
if_list_lock
);
list_del_rcu
(
&
hard_iface
->
list
);
spin_unlock
(
&
hard
if_list_lock
);
hardif_remove_interface
(
batman_if
);
hardif_remove_interface
(
hard_iface
);
break
;
case
NETDEV_CHANGEMTU
:
if
(
batman_if
->
soft_iface
)
update_min_mtu
(
batman_if
->
soft_iface
);
if
(
hard_iface
->
soft_iface
)
update_min_mtu
(
hard_iface
->
soft_iface
);
break
;
case
NETDEV_CHANGEADDR
:
if
(
batman_if
->
if_status
==
IF_NOT_IN_USE
)
if
(
hard_iface
->
if_status
==
IF_NOT_IN_USE
)
goto
hardif_put
;
check_known_mac_addr
(
batman_if
->
net_dev
);
update_mac_addresses
(
batman_if
);
check_known_mac_addr
(
hard_iface
->
net_dev
);
update_mac_addresses
(
hard_iface
);
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
if
(
batman_if
==
bat_priv
->
primary_if
)
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
if
(
hard_iface
==
bat_priv
->
primary_if
)
update_primary_addr
(
bat_priv
);
break
;
default:
...
...
@@ -548,7 +557,7 @@ static int hard_if_event(struct notifier_block *this,
};
hardif_put:
kref_put
(
&
batman_if
->
refcount
,
hardif_free_ref
);
hardif_free_ref
(
hard_iface
);
out:
return
NOTIFY_DONE
;
}
...
...
@@ -561,10 +570,10 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
{
struct
bat_priv
*
bat_priv
;
struct
batman_packet
*
batman_packet
;
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
int
ret
;
batman_if
=
container_of
(
ptype
,
struct
batman_if
,
batman_adv_ptype
);
hard_iface
=
container_of
(
ptype
,
struct
hard_iface
,
batman_adv_ptype
);
skb
=
skb_share_check
(
skb
,
GFP_ATOMIC
);
/* skb was released by skb_share_check() */
...
...
@@ -580,16 +589,16 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
||
!
skb_mac_header
(
skb
)))
goto
err_free
;
if
(
!
batman_if
->
soft_iface
)
if
(
!
hard_iface
->
soft_iface
)
goto
err_free
;
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
if
(
atomic_read
(
&
bat_priv
->
mesh_state
)
!=
MESH_ACTIVE
)
goto
err_free
;
/* discard frames on not active interfaces */
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
if
(
hard_iface
->
if_status
!=
IF_ACTIVE
)
goto
err_free
;
batman_packet
=
(
struct
batman_packet
*
)
skb
->
data
;
...
...
@@ -607,32 +616,32 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
switch
(
batman_packet
->
packet_type
)
{
/* batman originator packet */
case
BAT_PACKET
:
ret
=
recv_bat_packet
(
skb
,
batman_if
);
ret
=
recv_bat_packet
(
skb
,
hard_iface
);
break
;
/* batman icmp packet */
case
BAT_ICMP
:
ret
=
recv_icmp_packet
(
skb
,
batman_if
);
ret
=
recv_icmp_packet
(
skb
,
hard_iface
);
break
;
/* unicast packet */
case
BAT_UNICAST
:
ret
=
recv_unicast_packet
(
skb
,
batman_if
);
ret
=
recv_unicast_packet
(
skb
,
hard_iface
);
break
;
/* fragmented unicast packet */
case
BAT_UNICAST_FRAG
:
ret
=
recv_ucast_frag_packet
(
skb
,
batman_if
);
ret
=
recv_ucast_frag_packet
(
skb
,
hard_iface
);
break
;
/* broadcast packet */
case
BAT_BCAST
:
ret
=
recv_bcast_packet
(
skb
,
batman_if
);
ret
=
recv_bcast_packet
(
skb
,
hard_iface
);
break
;
/* vis packet */
case
BAT_VIS
:
ret
=
recv_vis_packet
(
skb
,
batman_if
);
ret
=
recv_vis_packet
(
skb
,
hard_iface
);
break
;
default:
ret
=
NET_RX_DROP
;
...
...
net/batman-adv/hard-interface.h
View file @
b8cec4a4
...
...
@@ -31,19 +31,18 @@
extern
struct
notifier_block
hard_if_notifier
;
struct
batman_if
*
get_batman_if
_by_netdev
(
struct
net_device
*
net_dev
);
int
hardif_enable_interface
(
struct
batman_if
*
batman_if
,
char
*
iface_name
);
void
hardif_disable_interface
(
struct
batman_if
*
batman_if
);
struct
hard_iface
*
hardif_get
_by_netdev
(
struct
net_device
*
net_dev
);
int
hardif_enable_interface
(
struct
hard_iface
*
hard_iface
,
char
*
iface_name
);
void
hardif_disable_interface
(
struct
hard_iface
*
hard_iface
);
void
hardif_remove_interfaces
(
void
);
int
hardif_min_mtu
(
struct
net_device
*
soft_iface
);
void
update_min_mtu
(
struct
net_device
*
soft_iface
);
void
hardif_free_rcu
(
struct
rcu_head
*
rcu
);
static
inline
void
hardif_free_ref
(
struct
kref
*
refcount
)
static
inline
void
hardif_free_ref
(
struct
hard_iface
*
hard_iface
)
{
struct
batman_if
*
batman_if
;
batman_if
=
container_of
(
refcount
,
struct
batman_if
,
refcount
);
kfree
(
batman_if
);
if
(
atomic_dec_and_test
(
&
hard_iface
->
refcount
))
call_rcu
(
&
hard_iface
->
rcu
,
hardif_free_rcu
);
}
#endif
/* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
net/batman-adv/hash.c
View file @
b8cec4a4
...
...
@@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash)
{
int
i
;
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
INIT_HLIST_HEAD
(
&
hash
->
table
[
i
]);
spin_lock_init
(
&
hash
->
list_locks
[
i
]);
}
}
/* free only the hashtable and the hash itself. */
void
hash_destroy
(
struct
hashtable_t
*
hash
)
{
kfree
(
hash
->
list_locks
);
kfree
(
hash
->
table
);
kfree
(
hash
);
}
...
...
@@ -43,20 +46,25 @@ struct hashtable_t *hash_new(int size)
{
struct
hashtable_t
*
hash
;
hash
=
kmalloc
(
sizeof
(
struct
hashtable_t
)
,
GFP_ATOMIC
);
hash
=
kmalloc
(
sizeof
(
struct
hashtable_t
),
GFP_ATOMIC
);
if
(
!
hash
)
return
NULL
;
hash
->
size
=
size
;
hash
->
table
=
kmalloc
(
sizeof
(
struct
element_t
*
)
*
size
,
GFP_ATOMIC
);
if
(
!
hash
->
table
)
goto
free_hash
;
if
(
!
hash
->
table
)
{
kfree
(
hash
);
return
NULL
;
}
hash
->
list_locks
=
kmalloc
(
sizeof
(
spinlock_t
)
*
size
,
GFP_ATOMIC
);
if
(
!
hash
->
list_locks
)
goto
free_table
;
hash
->
size
=
size
;
hash_init
(
hash
);
return
hash
;
free_table:
kfree
(
hash
->
table
);
free_hash:
kfree
(
hash
);
return
NULL
;
}
net/batman-adv/hash.h
View file @
b8cec4a4
...
...
@@ -28,21 +28,17 @@
* compare 2 element datas for their keys,
* return 0 if same and not 0 if not
* same */
typedef
int
(
*
hashdata_compare_cb
)(
void
*
,
void
*
);
typedef
int
(
*
hashdata_compare_cb
)(
struct
hlist_node
*
,
void
*
);
/* the hashfunction, should return an index
* based on the key in the data of the first
* argument and the size the second */
typedef
int
(
*
hashdata_choose_cb
)(
void
*
,
int
);
typedef
void
(
*
hashdata_free_cb
)(
void
*
,
void
*
);
struct
element_t
{
void
*
data
;
/* pointer to the data */
struct
hlist_node
hlist
;
/* bucket list pointer */
};
typedef
void
(
*
hashdata_free_cb
)(
struct
hlist_node
*
,
void
*
);
struct
hashtable_t
{
struct
hlist_head
*
table
;
/* the hashtable itself, with the buckets */
struct
hlist_head
*
table
;
/* the hashtable itself with the buckets */
spinlock_t
*
list_locks
;
/* spinlock for each hash list entry */
int
size
;
/* size of hashtable */
};
...
...
@@ -59,21 +55,22 @@ static inline void hash_delete(struct hashtable_t *hash,
hashdata_free_cb
free_cb
,
void
*
arg
)
{
struct
hlist_head
*
head
;
struct
hlist_node
*
walk
,
*
safe
;
s
truct
element_t
*
bucket
;
struct
hlist_node
*
node
,
*
node_tmp
;
s
pinlock_t
*
list_lock
;
/* spinlock to protect write access */
int
i
;
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
list_lock
=
&
hash
->
list_locks
[
i
];
hlist_for_each_safe
(
walk
,
safe
,
head
)
{
bucket
=
hlist_entry
(
walk
,
struct
element_t
,
hlist
);
if
(
free_cb
)
free_cb
(
bucket
->
data
,
arg
);
spin_lock_bh
(
list_lock
);
hlist_for_each_safe
(
node
,
node_tmp
,
head
)
{
hlist_del_rcu
(
node
);
hlist_del
(
walk
);
kfree
(
bucket
);
if
(
free_cb
)
free_cb
(
node
,
arg
);
}
spin_unlock_bh
(
list_lock
);
}
hash_destroy
(
hash
);
...
...
@@ -82,35 +79,41 @@ static inline void hash_delete(struct hashtable_t *hash,
/* adds data to the hashtable. returns 0 on success, -1 on error */
static
inline
int
hash_add
(
struct
hashtable_t
*
hash
,
hashdata_compare_cb
compare
,
hashdata_choose_cb
choose
,
void
*
data
)
hashdata_choose_cb
choose
,
void
*
data
,
struct
hlist_node
*
data_node
)
{
int
index
;
struct
hlist_head
*
head
;
struct
hlist_node
*
walk
,
*
saf
e
;
s
truct
element_t
*
bucket
;
struct
hlist_node
*
nod
e
;
s
pinlock_t
*
list_lock
;
/* spinlock to protect write access */
if
(
!
hash
)
return
-
1
;
goto
err
;
index
=
choose
(
data
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
list_lock
=
&
hash
->
list_locks
[
index
];
hlist_for_each_safe
(
walk
,
safe
,
head
)
{
bucket
=
hlist_entry
(
walk
,
struct
element_t
,
hlist
);
if
(
compare
(
bucket
->
data
,
data
))
return
-
1
;
rcu_read_lock
();
__hlist_for_each_rcu
(
node
,
head
)
{
if
(
!
compare
(
node
,
data
))
continue
;
goto
err_unlock
;
}
rcu_read_unlock
();
/* no duplicate found in list, add new element */
bucket
=
kmalloc
(
sizeof
(
struct
element_t
),
GFP_ATOMIC
);
if
(
!
bucket
)
return
-
1
;
bucket
->
data
=
data
;
hlist_add_head
(
&
bucket
->
hlist
,
head
);
spin_lock_bh
(
list_lock
);
hlist_add_head_rcu
(
data_node
,
head
);
spin_unlock_bh
(
list_lock
);
return
0
;
err_unlock:
rcu_read_unlock
();
err:
return
-
1
;
}
/* removes data from hash, if found. returns pointer do data on success, so you
...
...
@@ -122,50 +125,25 @@ static inline void *hash_remove(struct hashtable_t *hash,
hashdata_choose_cb
choose
,
void
*
data
)
{
size_t
index
;
struct
hlist_node
*
walk
;
struct
element_t
*
bucket
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
void
*
data_save
;
void
*
data_save
=
NULL
;
index
=
choose
(
data
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
if
(
compare
(
bucket
->
data
,
data
))
{
data_save
=
bucket
->
data
;
hlist_del
(
walk
);
kfree
(
bucket
);
return
data_save
;
}
}
return
NULL
;
}
/* finds data, based on the key in keydata. returns the found data on success,
* or NULL on error */
static
inline
void
*
hash_find
(
struct
hashtable_t
*
hash
,
hashdata_compare_cb
compare
,
hashdata_choose_cb
choose
,
void
*
keydata
)
{
int
index
;
struct
hlist_head
*
head
;
struct
hlist_node
*
walk
;
struct
element_t
*
bucket
;
if
(
!
hash
)
return
NULL
;
spin_lock_bh
(
&
hash
->
list_locks
[
index
]);
hlist_for_each
(
node
,
head
)
{
if
(
!
compare
(
node
,
data
))
continue
;
index
=
choose
(
keydata
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
hlist_for_each
(
walk
,
head
)
{
bucket
=
hlist_entry
(
walk
,
struct
element_t
,
hlist
);
if
(
compare
(
bucket
->
data
,
keydata
))
return
bucket
->
data
;
data_save
=
node
;
hlist_del_rcu
(
node
);
break
;
}
spin_unlock_bh
(
&
hash
->
list_locks
[
index
]);
return
NULL
;
return
data_save
;
}
#endif
/* _NET_BATMAN_ADV_HASH_H_ */
net/batman-adv/icmp_socket.c
View file @
b8cec4a4
...
...
@@ -156,10 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
struct
sk_buff
*
skb
;
struct
icmp_packet_rr
*
icmp_packet
;
struct
orig_node
*
orig_node
;
struct
batman_if
*
batman_if
;
struct
orig_node
*
orig_node
=
NULL
;
struct
neigh_node
*
neigh_node
=
NULL
;
size_t
packet_len
=
sizeof
(
struct
icmp_packet
);
uint8_t
dstaddr
[
ETH_ALEN
];
if
(
len
<
sizeof
(
struct
icmp_packet
))
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
...
...
@@ -219,47 +218,52 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if
(
atomic_read
(
&
bat_priv
->
mesh_state
)
!=
MESH_ACTIVE
)
goto
dst_unreach
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
icmp_packet
->
dst
));
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
icmp_packet
->
dst
);
if
(
!
orig_node
)
goto
unlock
;
if
(
!
orig_node
->
router
)
neigh_node
=
orig_node
->
router
;
if
(
!
neigh_node
)
goto
unlock
;
batman_if
=
orig_node
->
router
->
if_incoming
;
memcpy
(
dstaddr
,
orig_node
->
router
->
addr
,
ETH_ALEN
);
if
(
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
{
neigh_node
=
NULL
;
goto
unlock
;
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
rcu_read_unlock
(
);
if
(
!
batman_if
)
if
(
!
neigh_node
->
if_incoming
)
goto
dst_unreach
;
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
if
(
neigh_node
->
if_incoming
->
if_status
!=
IF_ACTIVE
)
goto
dst_unreach
;
memcpy
(
icmp_packet
->
orig
,
bat_priv
->
primary_if
->
net_dev
->
dev_addr
,
ETH_ALEN
);
if
(
packet_len
==
sizeof
(
struct
icmp_packet_rr
))
memcpy
(
icmp_packet
->
rr
,
batman_if
->
net_dev
->
dev_addr
,
ETH_ALEN
);
send_skb_packet
(
skb
,
batman_if
,
dstaddr
);
memcpy
(
icmp_packet
->
rr
,
neigh_node
->
if_incoming
->
net_dev
->
dev_addr
,
ETH_ALEN
);
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
goto
out
;
unlock:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
rcu_read_unlock
(
);
dst_unreach:
icmp_packet
->
msg_type
=
DESTINATION_UNREACHABLE
;
bat_socket_add_packet
(
socket_client
,
icmp_packet
,
packet_len
);
free_skb:
kfree_skb
(
skb
);
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
len
;
}
...
...
net/batman-adv/main.c
View file @
b8cec4a4
...
...
@@ -33,7 +33,7 @@
#include "vis.h"
#include "hash.h"
struct
list_head
if_list
;
struct
list_head
hard
if_list
;
unsigned
char
broadcast_addr
[]
=
{
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
};
...
...
@@ -41,7 +41,7 @@ struct workqueue_struct *bat_event_workqueue;
static
int
__init
batman_init
(
void
)
{
INIT_LIST_HEAD
(
&
if_list
);
INIT_LIST_HEAD
(
&
hard
if_list
);
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
...
...
@@ -79,7 +79,6 @@ int mesh_init(struct net_device *soft_iface)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
soft_iface
);
spin_lock_init
(
&
bat_priv
->
orig_hash_lock
);
spin_lock_init
(
&
bat_priv
->
forw_bat_list_lock
);
spin_lock_init
(
&
bat_priv
->
forw_bcast_list_lock
);
spin_lock_init
(
&
bat_priv
->
hna_lhash_lock
);
...
...
@@ -154,14 +153,14 @@ void dec_module_count(void)
int
is_my_mac
(
uint8_t
*
addr
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
(
hard_iface
->
if_status
!=
IF_ACTIVE
)
continue
;
if
(
compare_
orig
(
batman_if
->
net_dev
->
dev_addr
,
addr
))
{
if
(
compare_
eth
(
hard_iface
->
net_dev
->
dev_addr
,
addr
))
{
rcu_read_unlock
();
return
1
;
}
...
...
net/batman-adv/main.h
View file @
b8cec4a4
...
...
@@ -122,7 +122,7 @@
#define REVISION_VERSION_STR " "REVISION_VERSION
#endif
extern
struct
list_head
if_list
;
extern
struct
list_head
hard
if_list
;
extern
unsigned
char
broadcast_addr
[];
extern
struct
workqueue_struct
*
bat_event_workqueue
;
...
...
@@ -165,4 +165,14 @@ static inline void bat_dbg(char type __always_unused,
pr_err("%s: " fmt, _netdev->name, ## arg); \
} while (0)
/**
* returns 1 if they are the same ethernet addr
*
* note: can't use compare_ether_addr() as it requires aligned memory
*/
static
inline
int
compare_eth
(
void
*
data1
,
void
*
data2
)
{
return
(
memcmp
(
data1
,
data2
,
ETH_ALEN
)
==
0
?
1
:
0
);
}
#endif
/* _NET_BATMAN_ADV_MAIN_H_ */
net/batman-adv/originator.c
View file @
b8cec4a4
...
...
@@ -44,24 +44,36 @@ int originator_init(struct bat_priv *bat_priv)
if
(
bat_priv
->
orig_hash
)
return
1
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
bat_priv
->
orig_hash
=
hash_new
(
1024
);
if
(
!
bat_priv
->
orig_hash
)
goto
err
;
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
start_purge_timer
(
bat_priv
);
return
1
;
err:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
0
;
}
struct
neigh_node
*
create_neighbor
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
uint8_t
*
neigh
,
struct
batman_if
*
if_incoming
)
static
void
neigh_node_free_rcu
(
struct
rcu_head
*
rcu
)
{
struct
neigh_node
*
neigh_node
;
neigh_node
=
container_of
(
rcu
,
struct
neigh_node
,
rcu
);
kfree
(
neigh_node
);
}
void
neigh_node_free_ref
(
struct
neigh_node
*
neigh_node
)
{
if
(
atomic_dec_and_test
(
&
neigh_node
->
refcount
))
call_rcu
(
&
neigh_node
->
rcu
,
neigh_node_free_rcu
);
}
struct
neigh_node
*
create_neighbor
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
uint8_t
*
neigh
,
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
struct
neigh_node
*
neigh_node
;
...
...
@@ -73,50 +85,94 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
if
(
!
neigh_node
)
return
NULL
;
INIT_LIST_HEAD
(
&
neigh_node
->
list
);
INIT_HLIST_NODE
(
&
neigh_node
->
list
);
INIT_LIST_HEAD
(
&
neigh_node
->
bonding_list
);
memcpy
(
neigh_node
->
addr
,
neigh
,
ETH_ALEN
);
neigh_node
->
orig_node
=
orig_neigh_node
;
neigh_node
->
if_incoming
=
if_incoming
;
list_add_tail
(
&
neigh_node
->
list
,
&
orig_node
->
neigh_list
);
/* extra reference for return */
atomic_set
(
&
neigh_node
->
refcount
,
2
);
spin_lock_bh
(
&
orig_node
->
neigh_list_lock
);
hlist_add_head_rcu
(
&
neigh_node
->
list
,
&
orig_node
->
neigh_list
);
spin_unlock_bh
(
&
orig_node
->
neigh_list_lock
);
return
neigh_node
;
}
static
void
free_orig_node
(
void
*
data
,
void
*
arg
)
static
void
orig_node_free_rcu
(
struct
rcu_head
*
rcu
)
{
struct
list_head
*
list_pos
,
*
list_pos_tmp
;
struct
neigh_node
*
neigh_node
;
struct
orig_node
*
orig_node
=
(
struct
orig_node
*
)
data
;
struct
bat_priv
*
bat_priv
=
(
struct
bat_priv
*
)
arg
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
neigh_node
*
neigh_node
,
*
tmp_neigh_node
;
struct
orig_node
*
orig_node
;
/* for all neighbors towards this originator ... */
list_for_each_safe
(
list_pos
,
list_pos_tmp
,
&
orig_node
->
neigh_list
)
{
neigh_node
=
list_entry
(
list_pos
,
struct
neigh_node
,
list
);
orig_node
=
container_of
(
rcu
,
struct
orig_node
,
rcu
);
list_del
(
list_pos
);
kfree
(
neigh_node
);
spin_lock_bh
(
&
orig_node
->
neigh_list_lock
);
/* for all bonding members ... */
list_for_each_entry_safe
(
neigh_node
,
tmp_neigh_node
,
&
orig_node
->
bond_list
,
bonding_list
)
{
list_del_rcu
(
&
neigh_node
->
bonding_list
);
neigh_node_free_ref
(
neigh_node
);
}
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe
(
neigh_node
,
node
,
node_tmp
,
&
orig_node
->
neigh_list
,
list
)
{
hlist_del_rcu
(
&
neigh_node
->
list
);
neigh_node_free_ref
(
neigh_node
);
}
spin_unlock_bh
(
&
orig_node
->
neigh_list_lock
);
frag_list_free
(
&
orig_node
->
frag_list
);
hna_global_del_orig
(
bat_priv
,
orig_node
,
"originator timed out"
);
hna_global_del_orig
(
orig_node
->
bat_priv
,
orig_node
,
"originator timed out"
);
kfree
(
orig_node
->
bcast_own
);
kfree
(
orig_node
->
bcast_own_sum
);
kfree
(
orig_node
);
}
void
orig_node_free_ref
(
struct
orig_node
*
orig_node
)
{
if
(
atomic_dec_and_test
(
&
orig_node
->
refcount
))
call_rcu
(
&
orig_node
->
rcu
,
orig_node_free_rcu
);
}
void
originator_free
(
struct
bat_priv
*
bat_priv
)
{
if
(
!
bat_priv
->
orig_hash
)
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
hlist_head
*
head
;
spinlock_t
*
list_lock
;
/* spinlock to protect write access */
struct
orig_node
*
orig_node
;
int
i
;
if
(
!
hash
)
return
;
cancel_delayed_work_sync
(
&
bat_priv
->
orig_work
);
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
hash_delete
(
bat_priv
->
orig_hash
,
free_orig_node
,
bat_priv
);
bat_priv
->
orig_hash
=
NULL
;
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
list_lock
=
&
hash
->
list_locks
[
i
];
spin_lock_bh
(
list_lock
);
hlist_for_each_entry_safe
(
orig_node
,
node
,
node_tmp
,
head
,
hash_entry
)
{
hlist_del_rcu
(
node
);
orig_node_free_ref
(
orig_node
);
}
spin_unlock_bh
(
list_lock
);
}
hash_destroy
(
hash
);
}
/* this function finds or creates an originator entry for the given
...
...
@@ -127,10 +183,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
int
size
;
int
hash_added
;
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
addr
));
orig_node
=
orig_hash_find
(
bat_priv
,
addr
);
if
(
orig_node
)
return
orig_node
;
...
...
@@ -141,8 +194,16 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
if
(
!
orig_node
)
return
NULL
;
INIT_LIST_HEAD
(
&
orig_node
->
neigh_list
);
INIT_HLIST_HEAD
(
&
orig_node
->
neigh_list
);
INIT_LIST_HEAD
(
&
orig_node
->
bond_list
);
spin_lock_init
(
&
orig_node
->
ogm_cnt_lock
);
spin_lock_init
(
&
orig_node
->
bcast_seqno_lock
);
spin_lock_init
(
&
orig_node
->
neigh_list_lock
);
/* extra reference for return */
atomic_set
(
&
orig_node
->
refcount
,
2
);
orig_node
->
bat_priv
=
bat_priv
;
memcpy
(
orig_node
->
orig
,
addr
,
ETH_ALEN
);
orig_node
->
router
=
NULL
;
orig_node
->
hna_buff
=
NULL
;
...
...
@@ -151,6 +212,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
orig_node
->
batman_seqno_reset
=
jiffies
-
1
-
msecs_to_jiffies
(
RESET_PROTECTION_MS
);
atomic_set
(
&
orig_node
->
bond_candidates
,
0
);
size
=
bat_priv
->
num_ifaces
*
sizeof
(
unsigned
long
)
*
NUM_WORDS
;
orig_node
->
bcast_own
=
kzalloc
(
size
,
GFP_ATOMIC
);
...
...
@@ -166,8 +229,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
if
(
!
orig_node
->
bcast_own_sum
)
goto
free_bcast_own
;
hash_added
=
hash_add
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
orig_node
);
hash_added
=
hash_add
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
orig_node
,
&
orig_node
->
hash_entry
);
if
(
hash_added
<
0
)
goto
free_bcast_own_sum
;
...
...
@@ -185,23 +248,30 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
struct
orig_node
*
orig_node
,
struct
neigh_node
**
best_neigh_node
)
{
struct
list_head
*
list_pos
,
*
list_pos
_tmp
;
struct
hlist_node
*
node
,
*
node
_tmp
;
struct
neigh_node
*
neigh_node
;
bool
neigh_purged
=
false
;
*
best_neigh_node
=
NULL
;
spin_lock_bh
(
&
orig_node
->
neigh_list_lock
);
/* for all neighbors towards this originator ... */
list_for_each_safe
(
list_pos
,
list_pos_tmp
,
&
orig_node
->
neigh_list
)
{
neigh_node
=
list_entry
(
list_pos
,
struct
neigh_node
,
list
);
hlist_for_each_entry_safe
(
neigh_node
,
node
,
node_tmp
,
&
orig_node
->
neigh_list
,
list
)
{
if
((
time_after
(
jiffies
,
neigh_node
->
last_valid
+
PURGE_TIMEOUT
*
HZ
))
||
(
neigh_node
->
if_incoming
->
if_status
==
IF_INACTIVE
)
||
(
neigh_node
->
if_incoming
->
if_status
==
IF_NOT_IN_USE
)
||
(
neigh_node
->
if_incoming
->
if_status
==
IF_TO_BE_REMOVED
))
{
if
(
neigh_node
->
if_incoming
->
if_status
==
IF_TO_BE_REMOVED
)
if
((
neigh_node
->
if_incoming
->
if_status
==
IF_INACTIVE
)
||
(
neigh_node
->
if_incoming
->
if_status
==
IF_NOT_IN_USE
)
||
(
neigh_node
->
if_incoming
->
if_status
==
IF_TO_BE_REMOVED
))
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"neighbor purge: originator %pM, "
"neighbor: %pM, iface: %s
\n
"
,
...
...
@@ -215,14 +285,18 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
(
neigh_node
->
last_valid
/
HZ
));
neigh_purged
=
true
;
list_del
(
list_pos
);
kfree
(
neigh_node
);
hlist_del_rcu
(
&
neigh_node
->
list
);
bonding_candidate_del
(
orig_node
,
neigh_node
);
neigh_node_free_ref
(
neigh_node
);
}
else
{
if
((
!*
best_neigh_node
)
||
(
neigh_node
->
tq_avg
>
(
*
best_neigh_node
)
->
tq_avg
))
*
best_neigh_node
=
neigh_node
;
}
}
spin_unlock_bh
(
&
orig_node
->
neigh_list_lock
);
return
neigh_purged
;
}
...
...
@@ -245,9 +319,6 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
best_neigh_node
,
orig_node
->
hna_buff
,
orig_node
->
hna_buff_len
);
/* update bonding candidates, we could have lost
* some candidates. */
update_bonding_candidates
(
orig_node
);
}
}
...
...
@@ -257,40 +328,38 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
static
void
_purge_orig
(
struct
bat_priv
*
bat_priv
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
,
*
safe
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
hlist_head
*
head
;
s
truct
element_t
*
bucket
;
s
pinlock_t
*
list_lock
;
/* spinlock to protect write access */
struct
orig_node
*
orig_node
;
int
i
;
if
(
!
hash
)
return
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
/* for all origins... */
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
list_lock
=
&
hash
->
list_locks
[
i
];
hlist_for_each_entry_safe
(
bucket
,
walk
,
safe
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
spin_lock_bh
(
list_lock
);
hlist_for_each_entry_safe
(
orig_node
,
node
,
node_tmp
,
head
,
hash_entry
)
{
if
(
purge_orig_node
(
bat_priv
,
orig_node
))
{
if
(
orig_node
->
gw_flags
)
gw_node_delete
(
bat_priv
,
orig_node
);
hlist_del
(
walk
);
kfree
(
bucket
);
free_orig_node
(
orig_node
,
bat_priv
)
;
hlist_del
_rcu
(
node
);
orig_node_free_ref
(
orig_node
);
continue
;
}
if
(
time_after
(
jiffies
,
orig_node
->
last_frag_packet
+
msecs_to_jiffies
(
FRAG_TIMEOUT
)))
frag_list_free
(
&
orig_node
->
frag_list
);
}
spin_unlock_bh
(
list_lock
);
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
gw_node_purge
(
bat_priv
);
gw_election
(
bat_priv
);
...
...
@@ -318,9 +387,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
seq
->
private
;
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
orig_node
*
orig_node
;
struct
neigh_node
*
neigh_node
;
int
batman_count
=
0
;
...
...
@@ -348,14 +416,11 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
"Originator"
,
"last-seen"
,
"#"
,
TQ_MAX_VALUE
,
"Nexthop"
,
"outgoingIF"
,
"Potential nexthops"
);
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
if
(
!
orig_node
->
router
)
continue
;
...
...
@@ -374,8 +439,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
neigh_node
->
addr
,
neigh_node
->
if_incoming
->
net_dev
->
name
);
list_for_each_entry
(
neigh_node
,
&
orig_node
->
neigh_list
,
list
)
{
hlist_for_each_entry_rcu
(
neigh_node
,
node_tmp
,
&
orig_node
->
neigh_list
,
list
)
{
seq_printf
(
seq
,
" %pM (%3i)"
,
neigh_node
->
addr
,
neigh_node
->
tq_avg
);
}
...
...
@@ -383,10 +448,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
seq_printf
(
seq
,
"
\n
"
);
batman_count
++
;
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
((
batman_count
==
0
))
seq_printf
(
seq
,
"No batman nodes in range ...
\n
"
);
...
...
@@ -423,36 +487,36 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
return
0
;
}
int
orig_hash_add_if
(
struct
batman_if
*
batman_if
,
int
max_if_num
)
int
orig_hash_add_if
(
struct
hard_iface
*
hard_iface
,
int
max_if_num
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
bat_priv
*
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
orig_node
*
orig_node
;
int
i
;
int
i
,
ret
;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
spin_lock_bh
(
&
orig_node
->
ogm_cnt_lock
);
ret
=
orig_node_add_if
(
orig_node
,
max_if_num
);
spin_unlock_bh
(
&
orig_node
->
ogm_cnt_lock
);
if
(
orig_node_add_if
(
orig_node
,
max_if_num
)
==
-
1
)
if
(
ret
==
-
1
)
goto
err
;
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
0
;
err:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
rcu_read_unlock
(
);
return
-
ENOMEM
;
}
...
...
@@ -508,57 +572,55 @@ static int orig_node_del_if(struct orig_node *orig_node,
return
0
;
}
int
orig_hash_del_if
(
struct
batman_if
*
batman_if
,
int
max_if_num
)
int
orig_hash_del_if
(
struct
hard_iface
*
hard_iface
,
int
max_if_num
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
bat_priv
*
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
batman_if
*
batman_if_tmp
;
struct
hard_iface
*
hard_iface_tmp
;
struct
orig_node
*
orig_node
;
int
i
,
ret
;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
spin_lock_bh
(
&
orig_node
->
ogm_cnt_lock
);
ret
=
orig_node_del_if
(
orig_node
,
max_if_num
,
batman_if
->
if_num
);
hard_iface
->
if_num
);
spin_unlock_bh
(
&
orig_node
->
ogm_cnt_lock
);
if
(
ret
==
-
1
)
goto
err
;
}
rcu_read_unlock
();
}
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if_tmp
,
&
if_list
,
list
)
{
if
(
batman_if
_tmp
->
if_status
==
IF_NOT_IN_USE
)
list_for_each_entry_rcu
(
hard_iface_tmp
,
&
hard
if_list
,
list
)
{
if
(
hard_iface
_tmp
->
if_status
==
IF_NOT_IN_USE
)
continue
;
if
(
batman_if
==
batman_if
_tmp
)
if
(
hard_iface
==
hard_iface
_tmp
)
continue
;
if
(
batman_if
->
soft_iface
!=
batman_if
_tmp
->
soft_iface
)
if
(
hard_iface
->
soft_iface
!=
hard_iface
_tmp
->
soft_iface
)
continue
;
if
(
batman_if_tmp
->
if_num
>
batman_if
->
if_num
)
batman_if
_tmp
->
if_num
--
;
if
(
hard_iface_tmp
->
if_num
>
hard_iface
->
if_num
)
hard_iface
_tmp
->
if_num
--
;
}
rcu_read_unlock
();
batman_if
->
if_num
=
-
1
;
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
hard_iface
->
if_num
=
-
1
;
return
0
;
err:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
rcu_read_unlock
(
);
return
-
ENOMEM
;
}
net/batman-adv/originator.h
View file @
b8cec4a4
...
...
@@ -22,21 +22,28 @@
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
#define _NET_BATMAN_ADV_ORIGINATOR_H_
#include "hash.h"
int
originator_init
(
struct
bat_priv
*
bat_priv
);
void
originator_free
(
struct
bat_priv
*
bat_priv
);
void
purge_orig_ref
(
struct
bat_priv
*
bat_priv
);
void
orig_node_free_ref
(
struct
orig_node
*
orig_node
);
struct
orig_node
*
get_orig_node
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
);
struct
neigh_node
*
create_neighbor
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
uint8_t
*
neigh
,
struct
batman_if
*
if_incoming
);
struct
neigh_node
*
create_neighbor
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
uint8_t
*
neigh
,
struct
hard_iface
*
if_incoming
);
void
neigh_node_free_ref
(
struct
neigh_node
*
neigh_node
);
int
orig_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
);
int
orig_hash_add_if
(
struct
batman_if
*
batman_if
,
int
max_if_num
);
int
orig_hash_del_if
(
struct
batman_if
*
batman_if
,
int
max_if_num
);
int
orig_hash_add_if
(
struct
hard_iface
*
hard_iface
,
int
max_if_num
);
int
orig_hash_del_if
(
struct
hard_iface
*
hard_iface
,
int
max_if_num
);
/* returns 1 if they are the same originator */
static
inline
int
compare_orig
(
void
*
data1
,
void
*
data2
)
static
inline
int
compare_orig
(
struct
hlist_node
*
node
,
void
*
data2
)
{
void
*
data1
=
container_of
(
node
,
struct
orig_node
,
hash_entry
);
return
(
memcmp
(
data1
,
data2
,
ETH_ALEN
)
==
0
?
1
:
0
);
}
...
...
@@ -61,4 +68,35 @@ static inline int choose_orig(void *data, int32_t size)
return
hash
%
size
;
}
static
inline
struct
orig_node
*
orig_hash_find
(
struct
bat_priv
*
bat_priv
,
void
*
data
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_head
*
head
;
struct
hlist_node
*
node
;
struct
orig_node
*
orig_node
,
*
orig_node_tmp
=
NULL
;
int
index
;
if
(
!
hash
)
return
NULL
;
index
=
choose_orig
(
data
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
orig_node
,
data
))
continue
;
if
(
!
atomic_inc_not_zero
(
&
orig_node
->
refcount
))
continue
;
orig_node_tmp
=
orig_node
;
break
;
}
rcu_read_unlock
();
return
orig_node_tmp
;
}
#endif
/* _NET_BATMAN_ADV_ORIGINATOR_H_ */
net/batman-adv/routing.c
View file @
b8cec4a4
...
...
@@ -35,35 +35,33 @@
#include "gateway_client.h"
#include "unicast.h"
void
slide_own_bcast_window
(
struct
batman_if
*
batman_if
)
void
slide_own_bcast_window
(
struct
hard_iface
*
hard_iface
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
bat_priv
*
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
orig_node
*
orig_node
;
unsigned
long
*
word
;
int
i
;
size_t
word_index
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
word_index
=
batman_if
->
if_num
*
NUM_WORDS
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
spin_lock_bh
(
&
orig_node
->
ogm_cnt_lock
);
word_index
=
hard_iface
->
if_num
*
NUM_WORDS
;
word
=
&
(
orig_node
->
bcast_own
[
word_index
]);
bit_get_packet
(
bat_priv
,
word
,
1
,
0
);
orig_node
->
bcast_own_sum
[
batman_if
->
if_num
]
=
orig_node
->
bcast_own_sum
[
hard_iface
->
if_num
]
=
bit_packet_count
(
word
);
spin_unlock_bh
(
&
orig_node
->
ogm_cnt_lock
);
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
}
static
void
update_HNA
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
...
...
@@ -89,6 +87,8 @@ static void update_route(struct bat_priv *bat_priv,
struct
neigh_node
*
neigh_node
,
unsigned
char
*
hna_buff
,
int
hna_buff_len
)
{
struct
neigh_node
*
neigh_node_tmp
;
/* route deleted */
if
((
orig_node
->
router
)
&&
(
!
neigh_node
))
{
...
...
@@ -115,7 +115,12 @@ static void update_route(struct bat_priv *bat_priv,
orig_node
->
router
->
addr
);
}
if
(
neigh_node
&&
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
neigh_node
=
NULL
;
neigh_node_tmp
=
orig_node
->
router
;
orig_node
->
router
=
neigh_node
;
if
(
neigh_node_tmp
)
neigh_node_free_ref
(
neigh_node_tmp
);
}
...
...
@@ -138,73 +143,93 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
static
int
is_bidirectional_neigh
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
struct
batman_packet
*
batman_packet
,
struct
batman_if
*
if_incoming
)
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
struct
neigh_node
*
neigh_node
=
NULL
,
*
tmp_neigh_node
=
NULL
;
struct
neigh_node
*
neigh_node
=
NULL
,
*
tmp_neigh_node
;
struct
hlist_node
*
node
;
unsigned
char
total_count
;
uint8_t
orig_eq_count
,
neigh_rq_count
,
tq_own
;
int
tq_asym_penalty
,
ret
=
0
;
if
(
orig_node
==
orig_neigh_node
)
{
list_for_each_entry
(
tmp_neigh_node
,
&
orig_node
->
neigh_list
,
list
)
{
rcu_read_lock
();
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_node
->
neigh_list
,
list
)
{
if
(
!
compare_eth
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
))
continue
;
if
(
tmp_neigh_node
->
if_incoming
!=
if_incoming
)
continue
;
if
(
!
atomic_inc_not_zero
(
&
tmp_neigh_node
->
refcount
))
continue
;
if
(
compare_orig
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
)
&&
(
tmp_neigh_node
->
if_incoming
==
if_incoming
))
neigh_node
=
tmp_neigh_node
;
}
rcu_read_unlock
();
if
(
!
neigh_node
)
neigh_node
=
create_neighbor
(
orig_node
,
orig_neigh_node
,
orig_neigh_node
->
orig
,
if_incoming
);
/* create_neighbor failed, return 0 */
if
(
!
neigh_node
)
return
0
;
goto
out
;
neigh_node
->
last_valid
=
jiffies
;
}
else
{
/* find packet count of corresponding one hop neighbor */
list_for_each_entry
(
tmp_neigh_node
,
rcu_read_lock
();
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_neigh_node
->
neigh_list
,
list
)
{
if
(
compare_orig
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
)
&&
(
tmp_neigh_node
->
if_incoming
==
if_incoming
))
if
(
!
compare_eth
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
))
continue
;
if
(
tmp_neigh_node
->
if_incoming
!=
if_incoming
)
continue
;
if
(
!
atomic_inc_not_zero
(
&
tmp_neigh_node
->
refcount
))
continue
;
neigh_node
=
tmp_neigh_node
;
}
rcu_read_unlock
();
if
(
!
neigh_node
)
neigh_node
=
create_neighbor
(
orig_neigh_node
,
orig_neigh_node
,
orig_neigh_node
->
orig
,
if_incoming
);
/* create_neighbor failed, return 0 */
if
(
!
neigh_node
)
return
0
;
goto
out
;
}
orig_node
->
last_valid
=
jiffies
;
spin_lock_bh
(
&
orig_node
->
ogm_cnt_lock
);
orig_eq_count
=
orig_neigh_node
->
bcast_own_sum
[
if_incoming
->
if_num
];
neigh_rq_count
=
neigh_node
->
real_packet_count
;
spin_unlock_bh
(
&
orig_node
->
ogm_cnt_lock
);
/* pay attention to not get a value bigger than 100 % */
total_count
=
(
orig_neigh_node
->
bcast_own_sum
[
if_incoming
->
if_num
]
>
neigh_node
->
real_packet_count
?
neigh_node
->
real_packet_count
:
orig_neigh_node
->
bcast_own_sum
[
if_incoming
->
if_num
]);
total_count
=
(
orig_eq_count
>
neigh_rq_count
?
neigh_rq_count
:
orig_eq_count
);
/* if we have too few packets (too less data) we set tq_own to zero */
/* if we receive too few packets it is not considered bidirectional */
if
((
total_count
<
TQ_LOCAL_BIDRECT_SEND_MINIMUM
)
||
(
neigh_
node
->
real_packet
_count
<
TQ_LOCAL_BIDRECT_RECV_MINIMUM
))
orig_neigh_node
->
tq_own
=
0
;
(
neigh_
rq
_count
<
TQ_LOCAL_BIDRECT_RECV_MINIMUM
))
tq_own
=
0
;
else
/* neigh_node->real_packet_count is never zero as we
* only purge old information when getting new
* information */
orig_neigh_node
->
tq_own
=
(
TQ_MAX_VALUE
*
total_count
)
/
neigh_node
->
real_packet_count
;
tq_own
=
(
TQ_MAX_VALUE
*
total_count
)
/
neigh_rq_count
;
/*
* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
...
...
@@ -212,19 +237,15 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
* punishes asymmetric links more. This will give a value
* between 0 and TQ_MAX_VALUE
*/
orig_neigh_node
->
tq_asym_penalty
=
TQ_MAX_VALUE
-
(
TQ_MAX_VALUE
*
(
TQ_LOCAL_WINDOW_SIZE
-
neigh_node
->
real_packet_count
)
*
(
TQ_LOCAL_WINDOW_SIZE
-
neigh_node
->
real_packet_count
)
*
(
TQ_LOCAL_WINDOW_SIZE
-
neigh_node
->
real_packet_count
))
/
tq_asym_penalty
=
TQ_MAX_VALUE
-
(
TQ_MAX_VALUE
*
(
TQ_LOCAL_WINDOW_SIZE
-
neigh_rq_count
)
*
(
TQ_LOCAL_WINDOW_SIZE
-
neigh_rq_count
)
*
(
TQ_LOCAL_WINDOW_SIZE
-
neigh_rq_count
))
/
(
TQ_LOCAL_WINDOW_SIZE
*
TQ_LOCAL_WINDOW_SIZE
*
TQ_LOCAL_WINDOW_SIZE
);
batman_packet
->
tq
=
((
batman_packet
->
tq
*
orig_neigh_node
->
tq_own
*
orig_neigh_node
->
tq_asym_penalty
)
/
batman_packet
->
tq
=
((
batman_packet
->
tq
*
tq_own
*
tq_asym_penalty
)
/
(
TQ_MAX_VALUE
*
TQ_MAX_VALUE
));
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
...
...
@@ -233,34 +254,141 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
"real recv = %2i, local tq: %3i, asym_penalty: %3i, "
"total tq: %3i
\n
"
,
orig_node
->
orig
,
orig_neigh_node
->
orig
,
total_count
,
neigh_node
->
real_packet_count
,
orig_neigh_node
->
tq_own
,
orig_neigh_node
->
tq_asym_penalty
,
batman_packet
->
tq
);
neigh_rq_count
,
tq_own
,
tq_asym_penalty
,
batman_packet
->
tq
);
/* if link has the minimum required transmission quality
* consider it bidirectional */
if
(
batman_packet
->
tq
>=
TQ_TOTAL_BIDRECT_LIMIT
)
ret
urn
1
;
ret
=
1
;
return
0
;
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
return
ret
;
}
/* caller must hold the neigh_list_lock */
void
bonding_candidate_del
(
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
)
{
/* this neighbor is not part of our candidate list */
if
(
list_empty
(
&
neigh_node
->
bonding_list
))
goto
out
;
list_del_rcu
(
&
neigh_node
->
bonding_list
);
INIT_LIST_HEAD
(
&
neigh_node
->
bonding_list
);
neigh_node_free_ref
(
neigh_node
);
atomic_dec
(
&
orig_node
->
bond_candidates
);
out:
return
;
}
static
void
bonding_candidate_add
(
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
)
{
struct
hlist_node
*
node
;
struct
neigh_node
*
tmp_neigh_node
;
uint8_t
best_tq
,
interference_candidate
=
0
;
spin_lock_bh
(
&
orig_node
->
neigh_list_lock
);
/* only consider if it has the same primary address ... */
if
(
!
compare_eth
(
orig_node
->
orig
,
neigh_node
->
orig_node
->
primary_addr
))
goto
candidate_del
;
if
(
!
orig_node
->
router
)
goto
candidate_del
;
best_tq
=
orig_node
->
router
->
tq_avg
;
/* ... and is good enough to be considered */
if
(
neigh_node
->
tq_avg
<
best_tq
-
BONDING_TQ_THRESHOLD
)
goto
candidate_del
;
/**
* check if we have another candidate with the same mac address or
* interface. If we do, we won't select this candidate because of
* possible interference.
*/
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_node
->
neigh_list
,
list
)
{
if
(
tmp_neigh_node
==
neigh_node
)
continue
;
/* we only care if the other candidate is even
* considered as candidate. */
if
(
list_empty
(
&
tmp_neigh_node
->
bonding_list
))
continue
;
if
((
neigh_node
->
if_incoming
==
tmp_neigh_node
->
if_incoming
)
||
(
compare_eth
(
neigh_node
->
addr
,
tmp_neigh_node
->
addr
)))
{
interference_candidate
=
1
;
break
;
}
}
/* don't care further if it is an interference candidate */
if
(
interference_candidate
)
goto
candidate_del
;
/* this neighbor already is part of our candidate list */
if
(
!
list_empty
(
&
neigh_node
->
bonding_list
))
goto
out
;
if
(
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
goto
out
;
list_add_rcu
(
&
neigh_node
->
bonding_list
,
&
orig_node
->
bond_list
);
atomic_inc
(
&
orig_node
->
bond_candidates
);
goto
out
;
candidate_del:
bonding_candidate_del
(
orig_node
,
neigh_node
);
out:
spin_unlock_bh
(
&
orig_node
->
neigh_list_lock
);
return
;
}
/* copy primary address for bonding */
static
void
bonding_save_primary
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
struct
batman_packet
*
batman_packet
)
{
if
(
!
(
batman_packet
->
flags
&
PRIMARIES_FIRST_HOP
))
return
;
memcpy
(
orig_neigh_node
->
primary_addr
,
orig_node
->
orig
,
ETH_ALEN
);
}
static
void
update_orig
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
struct
batman_if
*
if_incoming
,
struct
hard_iface
*
if_incoming
,
unsigned
char
*
hna_buff
,
int
hna_buff_len
,
char
is_duplicate
)
{
struct
neigh_node
*
neigh_node
=
NULL
,
*
tmp_neigh_node
=
NULL
;
struct
orig_node
*
orig_node_tmp
;
struct
hlist_node
*
node
;
int
tmp_hna_buff_len
;
uint8_t
bcast_own_sum_orig
,
bcast_own_sum_neigh
;
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"update_originator(): "
"Searching and updating originator entry of received packet
\n
"
);
list_for_each_entry
(
tmp_neigh_node
,
&
orig_node
->
neigh_list
,
list
)
{
if
(
compare_orig
(
tmp_neigh_node
->
addr
,
ethhdr
->
h_source
)
&&
(
tmp_neigh_node
->
if_incoming
==
if_incoming
))
{
rcu_read_lock
();
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_node
->
neigh_list
,
list
)
{
if
(
compare_eth
(
tmp_neigh_node
->
addr
,
ethhdr
->
h_source
)
&&
(
tmp_neigh_node
->
if_incoming
==
if_incoming
)
&&
atomic_inc_not_zero
(
&
tmp_neigh_node
->
refcount
))
{
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
neigh_node
=
tmp_neigh_node
;
continue
;
}
...
...
@@ -279,16 +407,20 @@ static void update_orig(struct bat_priv *bat_priv,
orig_tmp
=
get_orig_node
(
bat_priv
,
ethhdr
->
h_source
);
if
(
!
orig_tmp
)
return
;
goto
unlock
;
neigh_node
=
create_neighbor
(
orig_node
,
orig_tmp
,
ethhdr
->
h_source
,
if_incoming
);
orig_node_free_ref
(
orig_tmp
);
if
(
!
neigh_node
)
return
;
goto
unlock
;
}
else
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Updating existing last-hop neighbor of originator
\n
"
);
rcu_read_unlock
();
orig_node
->
flags
=
batman_packet
->
flags
;
neigh_node
->
last_valid
=
jiffies
;
...
...
@@ -302,6 +434,8 @@ static void update_orig(struct bat_priv *bat_priv,
neigh_node
->
last_ttl
=
batman_packet
->
ttl
;
}
bonding_candidate_add
(
orig_node
,
neigh_node
);
tmp_hna_buff_len
=
(
hna_buff_len
>
batman_packet
->
num_hna
*
ETH_ALEN
?
batman_packet
->
num_hna
*
ETH_ALEN
:
hna_buff_len
);
...
...
@@ -318,10 +452,22 @@ static void update_orig(struct bat_priv *bat_priv,
/* if the TQ is the same and the link not more symetric we
* won't consider it either */
if
((
orig_node
->
router
)
&&
((
neigh_node
->
tq_avg
==
orig_node
->
router
->
tq_avg
)
&&
(
orig_node
->
router
->
orig_node
->
bcast_own_sum
[
if_incoming
->
if_num
]
>=
neigh_node
->
orig_node
->
bcast_own_sum
[
if_incoming
->
if_num
])))
(
neigh_node
->
tq_avg
==
orig_node
->
router
->
tq_avg
))
{
orig_node_tmp
=
orig_node
->
router
->
orig_node
;
spin_lock_bh
(
&
orig_node_tmp
->
ogm_cnt_lock
);
bcast_own_sum_orig
=
orig_node_tmp
->
bcast_own_sum
[
if_incoming
->
if_num
];
spin_unlock_bh
(
&
orig_node_tmp
->
ogm_cnt_lock
);
orig_node_tmp
=
neigh_node
->
orig_node
;
spin_lock_bh
(
&
orig_node_tmp
->
ogm_cnt_lock
);
bcast_own_sum_neigh
=
orig_node_tmp
->
bcast_own_sum
[
if_incoming
->
if_num
];
spin_unlock_bh
(
&
orig_node_tmp
->
ogm_cnt_lock
);
if
(
bcast_own_sum_orig
>=
bcast_own_sum_neigh
)
goto
update_hna
;
}
update_routes
(
bat_priv
,
orig_node
,
neigh_node
,
hna_buff
,
tmp_hna_buff_len
);
...
...
@@ -342,6 +488,14 @@ static void update_orig(struct bat_priv *bat_priv,
(
atomic_read
(
&
bat_priv
->
gw_mode
)
==
GW_MODE_CLIENT
)
&&
(
atomic_read
(
&
bat_priv
->
gw_sel_class
)
>
2
))
gw_check_election
(
bat_priv
,
orig_node
);
goto
out
;
unlock:
rcu_read_unlock
();
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
}
/* checks whether the host restarted and is in the protection time.
...
...
@@ -379,34 +533,38 @@ static int window_protected(struct bat_priv *bat_priv,
*/
static
char
count_real_packets
(
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
struct
batman_if
*
if_incoming
)
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
struct
orig_node
*
orig_node
;
struct
neigh_node
*
tmp_neigh_node
;
struct
hlist_node
*
node
;
char
is_duplicate
=
0
;
int32_t
seq_diff
;
int
need_update
=
0
;
int
set_mark
;
int
set_mark
,
ret
=
-
1
;
orig_node
=
get_orig_node
(
bat_priv
,
batman_packet
->
orig
);
if
(
!
orig_node
)
return
0
;
spin_lock_bh
(
&
orig_node
->
ogm_cnt_lock
);
seq_diff
=
batman_packet
->
seqno
-
orig_node
->
last_real_seqno
;
/* signalize caller that the packet is to be dropped. */
if
(
window_protected
(
bat_priv
,
seq_diff
,
&
orig_node
->
batman_seqno_reset
))
return
-
1
;
goto
out
;
list_for_each_entry
(
tmp_neigh_node
,
&
orig_node
->
neigh_list
,
list
)
{
rcu_read_lock
();
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_node
->
neigh_list
,
list
)
{
is_duplicate
|=
get_bit_status
(
tmp_neigh_node
->
real_bits
,
orig_node
->
last_real_seqno
,
batman_packet
->
seqno
);
if
(
compare_
orig
(
tmp_neigh_node
->
addr
,
ethhdr
->
h_source
)
&&
if
(
compare_
eth
(
tmp_neigh_node
->
addr
,
ethhdr
->
h_source
)
&&
(
tmp_neigh_node
->
if_incoming
==
if_incoming
))
set_mark
=
1
;
else
...
...
@@ -420,6 +578,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
tmp_neigh_node
->
real_packet_count
=
bit_packet_count
(
tmp_neigh_node
->
real_bits
);
}
rcu_read_unlock
();
if
(
need_update
)
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
...
...
@@ -428,121 +587,21 @@ static char count_real_packets(struct ethhdr *ethhdr,
orig_node
->
last_real_seqno
=
batman_packet
->
seqno
;
}
return
is_duplicate
;
}
/* copy primary address for bonding */
static
void
mark_bonding_address
(
struct
orig_node
*
orig_node
,
struct
orig_node
*
orig_neigh_node
,
struct
batman_packet
*
batman_packet
)
{
if
(
batman_packet
->
flags
&
PRIMARIES_FIRST_HOP
)
memcpy
(
orig_neigh_node
->
primary_addr
,
orig_node
->
orig
,
ETH_ALEN
);
return
;
}
/* mark possible bond.candidates in the neighbor list */
void
update_bonding_candidates
(
struct
orig_node
*
orig_node
)
{
int
candidates
;
int
interference_candidate
;
int
best_tq
;
struct
neigh_node
*
tmp_neigh_node
,
*
tmp_neigh_node2
;
struct
neigh_node
*
first_candidate
,
*
last_candidate
;
/* update the candidates for this originator */
if
(
!
orig_node
->
router
)
{
orig_node
->
bond
.
candidates
=
0
;
return
;
}
best_tq
=
orig_node
->
router
->
tq_avg
;
/* update bond.candidates */
candidates
=
0
;
/* mark other nodes which also received "PRIMARIES FIRST HOP" packets
* as "bonding partner" */
/* first, zero the list */
list_for_each_entry
(
tmp_neigh_node
,
&
orig_node
->
neigh_list
,
list
)
{
tmp_neigh_node
->
next_bond_candidate
=
NULL
;
}
first_candidate
=
NULL
;
last_candidate
=
NULL
;
list_for_each_entry
(
tmp_neigh_node
,
&
orig_node
->
neigh_list
,
list
)
{
/* only consider if it has the same primary address ... */
if
(
memcmp
(
orig_node
->
orig
,
tmp_neigh_node
->
orig_node
->
primary_addr
,
ETH_ALEN
)
!=
0
)
continue
;
/* ... and is good enough to be considered */
if
(
tmp_neigh_node
->
tq_avg
<
best_tq
-
BONDING_TQ_THRESHOLD
)
continue
;
/* check if we have another candidate with the same
* mac address or interface. If we do, we won't
* select this candidate because of possible interference. */
interference_candidate
=
0
;
list_for_each_entry
(
tmp_neigh_node2
,
&
orig_node
->
neigh_list
,
list
)
{
if
(
tmp_neigh_node2
==
tmp_neigh_node
)
continue
;
/* we only care if the other candidate is even
* considered as candidate. */
if
(
!
tmp_neigh_node2
->
next_bond_candidate
)
continue
;
if
((
tmp_neigh_node
->
if_incoming
==
tmp_neigh_node2
->
if_incoming
)
||
(
memcmp
(
tmp_neigh_node
->
addr
,
tmp_neigh_node2
->
addr
,
ETH_ALEN
)
==
0
))
{
interference_candidate
=
1
;
break
;
}
}
/* don't care further if it is an interference candidate */
if
(
interference_candidate
)
continue
;
if
(
!
first_candidate
)
{
first_candidate
=
tmp_neigh_node
;
tmp_neigh_node
->
next_bond_candidate
=
first_candidate
;
}
else
tmp_neigh_node
->
next_bond_candidate
=
last_candidate
;
last_candidate
=
tmp_neigh_node
;
candidates
++
;
}
if
(
candidates
>
0
)
{
first_candidate
->
next_bond_candidate
=
last_candidate
;
orig_node
->
bond
.
selected
=
first_candidate
;
}
ret
=
is_duplicate
;
orig_node
->
bond
.
candidates
=
candidates
;
out:
spin_unlock_bh
(
&
orig_node
->
ogm_cnt_lock
);
orig_node_free_ref
(
orig_node
);
return
ret
;
}
void
receive_bat_packet
(
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
unsigned
char
*
hna_buff
,
int
hna_buff_len
,
struct
batman_if
*
if_incoming
)
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
struct
orig_node
*
orig_neigh_node
,
*
orig_node
;
char
has_directlink_flag
;
char
is_my_addr
=
0
,
is_my_orig
=
0
,
is_my_oldorig
=
0
;
...
...
@@ -570,7 +629,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
has_directlink_flag
=
(
batman_packet
->
flags
&
DIRECTLINK
?
1
:
0
);
is_single_hop_neigh
=
(
compare_
orig
(
ethhdr
->
h_source
,
is_single_hop_neigh
=
(
compare_
eth
(
ethhdr
->
h_source
,
batman_packet
->
orig
)
?
1
:
0
);
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
...
...
@@ -584,26 +643,26 @@ void receive_bat_packet(struct ethhdr *ethhdr,
has_directlink_flag
);
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
(
hard_iface
->
if_status
!=
IF_ACTIVE
)
continue
;
if
(
batman_if
->
soft_iface
!=
if_incoming
->
soft_iface
)
if
(
hard_iface
->
soft_iface
!=
if_incoming
->
soft_iface
)
continue
;
if
(
compare_
orig
(
ethhdr
->
h_source
,
batman_if
->
net_dev
->
dev_addr
))
if
(
compare_
eth
(
ethhdr
->
h_source
,
hard_iface
->
net_dev
->
dev_addr
))
is_my_addr
=
1
;
if
(
compare_
orig
(
batman_packet
->
orig
,
batman_if
->
net_dev
->
dev_addr
))
if
(
compare_
eth
(
batman_packet
->
orig
,
hard_iface
->
net_dev
->
dev_addr
))
is_my_orig
=
1
;
if
(
compare_
orig
(
batman_packet
->
prev_sender
,
batman_if
->
net_dev
->
dev_addr
))
if
(
compare_
eth
(
batman_packet
->
prev_sender
,
hard_iface
->
net_dev
->
dev_addr
))
is_my_oldorig
=
1
;
if
(
compare_
orig
(
ethhdr
->
h_source
,
broadcast_addr
))
if
(
compare_
eth
(
ethhdr
->
h_source
,
broadcast_addr
))
is_broadcast
=
1
;
}
rcu_read_unlock
();
...
...
@@ -635,7 +694,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
int
offset
;
orig_neigh_node
=
get_orig_node
(
bat_priv
,
ethhdr
->
h_source
);
if
(
!
orig_neigh_node
)
return
;
...
...
@@ -644,18 +702,22 @@ void receive_bat_packet(struct ethhdr *ethhdr,
/* if received seqno equals last send seqno save new
* seqno for bidirectional check */
if
(
has_directlink_flag
&&
compare_
orig
(
if_incoming
->
net_dev
->
dev_addr
,
compare_
eth
(
if_incoming
->
net_dev
->
dev_addr
,
batman_packet
->
orig
)
&&
(
batman_packet
->
seqno
-
if_incoming_seqno
+
2
==
0
))
{
offset
=
if_incoming
->
if_num
*
NUM_WORDS
;
spin_lock_bh
(
&
orig_neigh_node
->
ogm_cnt_lock
);
word
=
&
(
orig_neigh_node
->
bcast_own
[
offset
]);
bit_mark
(
word
,
0
);
orig_neigh_node
->
bcast_own_sum
[
if_incoming
->
if_num
]
=
bit_packet_count
(
word
);
spin_unlock_bh
(
&
orig_neigh_node
->
ogm_cnt_lock
);
}
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: "
"originator packet from myself (via neighbor)
\n
"
);
orig_node_free_ref
(
orig_neigh_node
);
return
;
}
...
...
@@ -676,27 +738,27 @@ void receive_bat_packet(struct ethhdr *ethhdr,
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: packet within seqno protection time "
"(sender: %pM)
\n
"
,
ethhdr
->
h_source
);
return
;
goto
out
;
}
if
(
batman_packet
->
tq
==
0
)
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: originator packet with tq equal 0
\n
"
);
return
;
goto
out
;
}
/* avoid temporary routing loops */
if
((
orig_node
->
router
)
&&
(
orig_node
->
router
->
orig_node
->
router
)
&&
(
compare_
orig
(
orig_node
->
router
->
addr
,
(
compare_
eth
(
orig_node
->
router
->
addr
,
batman_packet
->
prev_sender
))
&&
!
(
compare_
orig
(
batman_packet
->
orig
,
batman_packet
->
prev_sender
))
&&
(
compare_
orig
(
orig_node
->
router
->
addr
,
!
(
compare_
eth
(
batman_packet
->
orig
,
batman_packet
->
prev_sender
))
&&
(
compare_
eth
(
orig_node
->
router
->
addr
,
orig_node
->
router
->
orig_node
->
router
->
addr
)))
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: ignoring all rebroadcast packets that "
"may make me loop (sender: %pM)
\n
"
,
ethhdr
->
h_source
);
return
;
goto
out
;
}
/* if sender is a direct neighbor the sender mac equals
...
...
@@ -705,19 +767,21 @@ void receive_bat_packet(struct ethhdr *ethhdr,
orig_node
:
get_orig_node
(
bat_priv
,
ethhdr
->
h_source
));
if
(
!
orig_neigh_node
)
return
;
goto
out
;
/* drop packet if sender is not a direct neighbor and if we
* don't route towards it */
if
(
!
is_single_hop_neigh
&&
(
!
orig_neigh_node
->
router
))
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: OGM via unknown neighbor!
\n
"
);
return
;
goto
out_neigh
;
}
is_bidirectional
=
is_bidirectional_neigh
(
orig_node
,
orig_neigh_node
,
batman_packet
,
if_incoming
);
bonding_save_primary
(
orig_node
,
orig_neigh_node
,
batman_packet
);
/* update ranking if it is not a duplicate or has the same
* seqno and similar ttl as the non-duplicate */
if
(
is_bidirectional
&&
...
...
@@ -727,9 +791,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
update_orig
(
bat_priv
,
orig_node
,
ethhdr
,
batman_packet
,
if_incoming
,
hna_buff
,
hna_buff_len
,
is_duplicate
);
mark_bonding_address
(
orig_node
,
orig_neigh_node
,
batman_packet
);
update_bonding_candidates
(
orig_node
);
/* is single hop (direct) neighbor */
if
(
is_single_hop_neigh
)
{
...
...
@@ -739,31 +800,36 @@ void receive_bat_packet(struct ethhdr *ethhdr,
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Forwarding packet: "
"rebroadcast neighbor packet with direct link flag
\n
"
);
return
;
goto
out_neigh
;
}
/* multihop originator */
if
(
!
is_bidirectional
)
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: not received via bidirectional link
\n
"
);
return
;
goto
out_neigh
;
}
if
(
is_duplicate
)
{
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Drop packet: duplicate packet received
\n
"
);
return
;
goto
out_neigh
;
}
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Forwarding packet: rebroadcast originator packet
\n
"
);
schedule_forward_packet
(
orig_node
,
ethhdr
,
batman_packet
,
0
,
hna_buff_len
,
if_incoming
);
out_neigh:
if
((
orig_neigh_node
)
&&
(
!
is_single_hop_neigh
))
orig_node_free_ref
(
orig_neigh_node
);
out:
orig_node_free_ref
(
orig_node
);
}
int
recv_bat_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
batman_if
)
int
recv_bat_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
hard_iface
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
ethhdr
*
ethhdr
;
/* drop packet if it has not necessary minimum size */
...
...
@@ -790,12 +856,10 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
ethhdr
=
(
struct
ethhdr
*
)
skb_mac_header
(
skb
);
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
receive_aggr_bat_packet
(
ethhdr
,
skb
->
data
,
skb_headlen
(
skb
),
batman_if
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
hard_iface
);
kfree_skb
(
skb
);
return
NET_RX_SUCCESS
;
...
...
@@ -804,42 +868,45 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
static
int
recv_my_icmp_packet
(
struct
bat_priv
*
bat_priv
,
struct
sk_buff
*
skb
,
size_t
icmp_len
)
{
struct
orig_node
*
orig_node
;
struct
orig_node
*
orig_node
=
NULL
;
struct
neigh_node
*
neigh_node
=
NULL
;
struct
icmp_packet_rr
*
icmp_packet
;
struct
batman_if
*
batman_if
;
int
ret
;
uint8_t
dstaddr
[
ETH_ALEN
];
int
ret
=
NET_RX_DROP
;
icmp_packet
=
(
struct
icmp_packet_rr
*
)
skb
->
data
;
/* add data to device queue */
if
(
icmp_packet
->
msg_type
!=
ECHO_REQUEST
)
{
bat_socket_receive_packet
(
icmp_packet
,
icmp_len
);
return
NET_RX_DROP
;
goto
out
;
}
if
(
!
bat_priv
->
primary_if
)
return
NET_RX_DROP
;
goto
out
;
/* answer echo request (ping) */
/* get routing information */
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
icmp_packet
->
orig
));
ret
=
NET_RX_DROP
;
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
icmp_packet
->
orig
);
if
((
orig_node
)
&&
(
orig_node
->
router
))
{
if
(
!
orig_node
)
goto
unlock
;
neigh_node
=
orig_node
->
router
;
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
batman_if
=
orig_node
->
router
->
if_incoming
;
memcpy
(
dstaddr
,
orig_node
->
router
->
addr
,
ETH_ALEN
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
!
neigh_node
)
goto
unlock
;
if
(
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
{
neigh_node
=
NULL
;
goto
unlock
;
}
rcu_read_unlock
();
/* create a copy of the skb, if needed, to modify it. */
if
(
skb_cow
(
skb
,
sizeof
(
struct
ethhdr
))
<
0
)
return
NET_RX_DROP
;
goto
out
;
icmp_packet
=
(
struct
icmp_packet_rr
*
)
skb
->
data
;
...
...
@@ -849,23 +916,27 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
icmp_packet
->
msg_type
=
ECHO_REPLY
;
icmp_packet
->
ttl
=
TTL
;
send_skb_packet
(
skb
,
batman_if
,
dst
addr
);
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
ret
=
NET_RX_SUCCESS
;
goto
out
;
}
else
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
unlock:
rcu_read_unlock
();
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
ret
;
}
static
int
recv_icmp_ttl_exceeded
(
struct
bat_priv
*
bat_priv
,
struct
sk_buff
*
skb
)
{
struct
orig_node
*
orig_node
;
struct
orig_node
*
orig_node
=
NULL
;
struct
neigh_node
*
neigh_node
=
NULL
;
struct
icmp_packet
*
icmp_packet
;
struct
batman_if
*
batman_if
;
int
ret
;
uint8_t
dstaddr
[
ETH_ALEN
];
int
ret
=
NET_RX_DROP
;
icmp_packet
=
(
struct
icmp_packet
*
)
skb
->
data
;
...
...
@@ -874,32 +945,36 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
pr_debug
(
"Warning - can't forward icmp packet from %pM to "
"%pM: ttl exceeded
\n
"
,
icmp_packet
->
orig
,
icmp_packet
->
dst
);
return
NET_RX_DROP
;
goto
out
;
}
if
(
!
bat_priv
->
primary_if
)
return
NET_RX_DROP
;
goto
out
;
/* get routing information */
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
icmp_packet
->
orig
));
ret
=
NET_RX_DROP
;
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
icmp_packet
->
orig
);
if
(
!
orig_node
)
goto
unlock
;
neigh_node
=
orig_node
->
router
;
if
((
orig_node
)
&&
(
orig_node
->
router
))
{
if
(
!
neigh_node
)
goto
unlock
;
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
batman_if
=
orig_node
->
router
->
if_incoming
;
memcpy
(
dstaddr
,
orig_node
->
router
->
addr
,
ETH_ALEN
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
{
neigh_node
=
NULL
;
goto
unlock
;
}
rcu_read_unlock
();
/* create a copy of the skb, if needed, to modify it. */
if
(
skb_cow
(
skb
,
sizeof
(
struct
ethhdr
))
<
0
)
return
NET_RX_DROP
;
goto
out
;
icmp_packet
=
(
struct
icmp_packet
*
)
skb
->
data
;
icmp_packet
=
(
struct
icmp_packet
*
)
skb
->
data
;
memcpy
(
icmp_packet
->
dst
,
icmp_packet
->
orig
,
ETH_ALEN
);
memcpy
(
icmp_packet
->
orig
,
...
...
@@ -907,26 +982,30 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
icmp_packet
->
msg_type
=
TTL_EXCEEDED
;
icmp_packet
->
ttl
=
TTL
;
send_skb_packet
(
skb
,
batman_if
,
dst
addr
);
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
ret
=
NET_RX_SUCCESS
;
goto
out
;
}
else
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
unlock:
rcu_read_unlock
();
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
ret
;
}
int
recv_icmp_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
)
int
recv_icmp_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
recv_if
->
soft_iface
);
struct
icmp_packet_rr
*
icmp_packet
;
struct
ethhdr
*
ethhdr
;
struct
orig_node
*
orig_node
;
struct
batman_if
*
batman_if
;
struct
orig_node
*
orig_node
=
NULL
;
struct
neigh_node
*
neigh_node
=
NULL
;
int
hdr_size
=
sizeof
(
struct
icmp_packet
);
int
ret
;
uint8_t
dstaddr
[
ETH_ALEN
];
int
ret
=
NET_RX_DROP
;
/**
* we truncate all incoming icmp packets if they don't match our size
...
...
@@ -936,21 +1015,21 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
/* drop packet if it has not necessary minimum size */
if
(
unlikely
(
!
pskb_may_pull
(
skb
,
hdr_size
)))
return
NET_RX_DROP
;
goto
out
;
ethhdr
=
(
struct
ethhdr
*
)
skb_mac_header
(
skb
);
/* packet with unicast indication but broadcast recipient */
if
(
is_broadcast_ether_addr
(
ethhdr
->
h_dest
))
return
NET_RX_DROP
;
goto
out
;
/* packet with broadcast sender address */
if
(
is_broadcast_ether_addr
(
ethhdr
->
h_source
))
return
NET_RX_DROP
;
goto
out
;
/* not for me */
if
(
!
is_my_mac
(
ethhdr
->
h_dest
))
return
NET_RX_DROP
;
goto
out
;
icmp_packet
=
(
struct
icmp_packet_rr
*
)
skb
->
data
;
...
...
@@ -970,25 +1049,28 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
if
(
icmp_packet
->
ttl
<
2
)
return
recv_icmp_ttl_exceeded
(
bat_priv
,
skb
);
ret
=
NET_RX_DROP
;
/* get routing information */
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
icmp_packet
->
dst
));
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
icmp_packet
->
dst
);
if
(
!
orig_node
)
goto
unlock
;
if
((
orig_node
)
&&
(
orig_node
->
router
))
{
neigh_node
=
orig_node
->
router
;
if
(
!
neigh_node
)
goto
unlock
;
if
(
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
{
neigh_node
=
NULL
;
goto
unlock
;
}
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
batman_if
=
orig_node
->
router
->
if_incoming
;
memcpy
(
dstaddr
,
orig_node
->
router
->
addr
,
ETH_ALEN
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
rcu_read_unlock
();
/* create a copy of the skb, if needed, to modify it. */
if
(
skb_cow
(
skb
,
sizeof
(
struct
ethhdr
))
<
0
)
return
NET_RX_DROP
;
goto
out
;
icmp_packet
=
(
struct
icmp_packet_rr
*
)
skb
->
data
;
...
...
@@ -996,24 +1078,30 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
icmp_packet
->
ttl
--
;
/* route it */
send_skb_packet
(
skb
,
batman_if
,
dst
addr
);
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
ret
=
NET_RX_SUCCESS
;
goto
out
;
}
else
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
unlock:
rcu_read_unlock
();
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
ret
;
}
/* find a suitable router for this originator, and use
* bonding if possible. */
* bonding if possible. increases the found neighbors
* refcount.*/
struct
neigh_node
*
find_router
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
batman_if
*
recv_if
)
struct
hard_iface
*
recv_if
)
{
struct
orig_node
*
primary_orig_node
;
struct
orig_node
*
router_orig
;
struct
neigh_node
*
router
,
*
first_candidate
,
*
best_router
;
struct
neigh_node
*
router
,
*
first_candidate
,
*
tmp_neigh_node
;
static
uint8_t
zero_mac
[
ETH_ALEN
]
=
{
0
,
0
,
0
,
0
,
0
,
0
};
int
bonding_enabled
;
...
...
@@ -1025,78 +1113,128 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
/* without bonding, the first node should
* always choose the default router. */
bonding_enabled
=
atomic_read
(
&
bat_priv
->
bonding
);
if
((
!
recv_if
)
&&
(
!
bonding_enabled
))
return
orig_node
->
router
;
rcu_read_lock
();
/* select default router to output */
router
=
orig_node
->
router
;
router_orig
=
orig_node
->
router
->
orig_node
;
if
(
!
router_orig
||
!
atomic_inc_not_zero
(
&
router
->
refcount
))
{
rcu_read_unlock
();
return
NULL
;
}
if
((
!
recv_if
)
&&
(
!
bonding_enabled
))
goto
return_router
;
/* if we have something in the primary_addr, we can search
* for a potential bonding candidate. */
if
(
memcmp
(
router_orig
->
primary_addr
,
zero_mac
,
ETH_ALEN
)
==
0
)
return
orig_node
->
router
;
if
(
compare_eth
(
router_orig
->
primary_addr
,
zero_mac
)
)
goto
return_
router
;
/* find the orig_node which has the primary interface. might
* even be the same as our router_orig in many cases */
if
(
memcmp
(
router_orig
->
primary_addr
,
router_orig
->
orig
,
ETH_ALEN
)
==
0
)
{
if
(
compare_eth
(
router_orig
->
primary_addr
,
router_orig
->
orig
))
{
primary_orig_node
=
router_orig
;
}
else
{
primary_orig_node
=
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
primary_orig_node
=
orig_hash_find
(
bat_priv
,
router_orig
->
primary_addr
);
if
(
!
primary_orig_node
)
return
orig_node
->
router
;
goto
return_router
;
orig_node_free_ref
(
primary_orig_node
);
}
/* with less than 2 candidates, we can't do any
* bonding and prefer the original router. */
if
(
primary_orig_node
->
bond
.
candidates
<
2
)
return
orig_node
->
router
;
if
(
atomic_read
(
&
primary_orig_node
->
bond_candidates
)
<
2
)
goto
return_router
;
/* all nodes between should choose a candidate which
* is is not on the interface where the packet came
* in. */
first_candidate
=
primary_orig_node
->
bond
.
selected
;
router
=
first_candidate
;
neigh_node_free_ref
(
router
);
first_candidate
=
NULL
;
router
=
NULL
;
if
(
bonding_enabled
)
{
/* in the bonding case, send the packets in a round
* robin fashion over the remaining interfaces. */
do
{
list_for_each_entry_rcu
(
tmp_neigh_node
,
&
primary_orig_node
->
bond_list
,
bonding_list
)
{
if
(
!
first_candidate
)
first_candidate
=
tmp_neigh_node
;
/* recv_if == NULL on the first node. */
if
(
router
->
if_incoming
!=
recv_if
)
if
(
tmp_neigh_node
->
if_incoming
!=
recv_if
&&
atomic_inc_not_zero
(
&
tmp_neigh_node
->
refcount
))
{
router
=
tmp_neigh_node
;
break
;
}
}
router
=
router
->
next_bond_candidate
;
}
while
(
router
!=
first_candidate
);
/* use the first candidate if nothing was found. */
if
(
!
router
&&
first_candidate
&&
atomic_inc_not_zero
(
&
first_candidate
->
refcount
))
router
=
first_candidate
;
primary_orig_node
->
bond
.
selected
=
router
->
next_bond_candidate
;
if
(
!
router
)
{
rcu_read_unlock
();
return
NULL
;
}
/* selected should point to the next element
* after the current router */
spin_lock_bh
(
&
primary_orig_node
->
neigh_list_lock
);
/* this is a list_move(), which unfortunately
* does not exist as rcu version */
list_del_rcu
(
&
primary_orig_node
->
bond_list
);
list_add_rcu
(
&
primary_orig_node
->
bond_list
,
&
router
->
bonding_list
);
spin_unlock_bh
(
&
primary_orig_node
->
neigh_list_lock
);
}
else
{
/* if bonding is disabled, use the best of the
* remaining candidates which are not using
* this interface. */
best_router
=
first_candidate
;
list_for_each_entry_rcu
(
tmp_neigh_node
,
&
primary_orig_node
->
bond_list
,
bonding_list
)
{
if
(
!
first_candidate
)
first_candidate
=
tmp_neigh_node
;
do
{
/* recv_if == NULL on the first node. */
if
((
router
->
if_incoming
!=
recv_if
)
&&
(
router
->
tq_avg
>
best_router
->
tq_avg
))
best_router
=
router
;
if
(
tmp_neigh_node
->
if_incoming
==
recv_if
)
continue
;
router
=
router
->
next_bond_candidate
;
}
while
(
router
!=
first_candidate
)
;
if
(
!
atomic_inc_not_zero
(
&
tmp_neigh_node
->
refcount
))
continue
;
router
=
best_router
;
/* if we don't have a router yet
* or this one is better, choose it. */
if
((
!
router
)
||
(
tmp_neigh_node
->
tq_avg
>
router
->
tq_avg
))
{
/* decrement refcount of
* previously selected router */
if
(
router
)
neigh_node_free_ref
(
router
);
router
=
tmp_neigh_node
;
atomic_inc_not_zero
(
&
router
->
refcount
);
}
neigh_node_free_ref
(
tmp_neigh_node
);
}
/* use the first candidate if nothing was found. */
if
(
!
router
&&
first_candidate
&&
atomic_inc_not_zero
(
&
first_candidate
->
refcount
))
router
=
first_candidate
;
}
return_router:
rcu_read_unlock
();
return
router
;
}
...
...
@@ -1125,17 +1263,14 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
return
0
;
}
int
route_unicast_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
,
int
hdr_size
)
int
route_unicast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
recv_if
->
soft_iface
);
struct
orig_node
*
orig_node
;
struct
neigh_node
*
router
;
struct
batman_if
*
batman_if
;
uint8_t
dstaddr
[
ETH_ALEN
];
struct
orig_node
*
orig_node
=
NULL
;
struct
neigh_node
*
neigh_node
=
NULL
;
struct
unicast_packet
*
unicast_packet
;
struct
ethhdr
*
ethhdr
=
(
struct
ethhdr
*
)
skb_mac_header
(
skb
);
int
ret
;
int
ret
=
NET_RX_DROP
;
struct
sk_buff
*
new_skb
;
unicast_packet
=
(
struct
unicast_packet
*
)
skb
->
data
;
...
...
@@ -1145,53 +1280,51 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
pr_debug
(
"Warning - can't forward unicast packet from %pM to "
"%pM: ttl exceeded
\n
"
,
ethhdr
->
h_source
,
unicast_packet
->
dest
);
return
NET_RX_DROP
;
goto
out
;
}
/* get routing information */
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
unicast_packet
->
dest
));
router
=
find_router
(
bat_priv
,
orig_node
,
recv_if
);
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
unicast_packet
->
dest
);
if
(
!
router
)
{
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
NET_RX_DROP
;
}
if
(
!
orig_node
)
goto
unlock
;
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
rcu_read_unlock
();
batman_if
=
router
->
if_incoming
;
memcpy
(
dstaddr
,
router
->
addr
,
ETH_ALEN
);
/* find_router() increases neigh_nodes refcount if found. */
neigh_node
=
find_router
(
bat_priv
,
orig_node
,
recv_if
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
!
neigh_node
)
goto
out
;
/* create a copy of the skb, if needed, to modify it. */
if
(
skb_cow
(
skb
,
sizeof
(
struct
ethhdr
))
<
0
)
return
NET_RX_DROP
;
goto
out
;
unicast_packet
=
(
struct
unicast_packet
*
)
skb
->
data
;
if
(
unicast_packet
->
packet_type
==
BAT_UNICAST
&&
atomic_read
(
&
bat_priv
->
fragmentation
)
&&
skb
->
len
>
batman_if
->
net_dev
->
mtu
)
return
frag_send_skb
(
skb
,
bat_priv
,
batman_if
,
dstaddr
);
skb
->
len
>
neigh_node
->
if_incoming
->
net_dev
->
mtu
)
{
ret
=
frag_send_skb
(
skb
,
bat_priv
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
goto
out
;
}
if
(
unicast_packet
->
packet_type
==
BAT_UNICAST_FRAG
&&
frag_can_reassemble
(
skb
,
batman_if
->
net_dev
->
mtu
))
{
frag_can_reassemble
(
skb
,
neigh_node
->
if_incoming
->
net_dev
->
mtu
))
{
ret
=
frag_reassemble_skb
(
skb
,
bat_priv
,
&
new_skb
);
if
(
ret
==
NET_RX_DROP
)
return
NET_RX_DROP
;
goto
out
;
/* packet was buffered for late merge */
if
(
!
new_skb
)
return
NET_RX_SUCCESS
;
if
(
!
new_skb
)
{
ret
=
NET_RX_SUCCESS
;
goto
out
;
}
skb
=
new_skb
;
unicast_packet
=
(
struct
unicast_packet
*
)
skb
->
data
;
...
...
@@ -1201,12 +1334,21 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
unicast_packet
->
ttl
--
;
/* route it */
send_skb_packet
(
skb
,
batman_if
,
dstaddr
);
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
ret
=
NET_RX_SUCCESS
;
goto
out
;
return
NET_RX_SUCCESS
;
unlock:
rcu_read_unlock
();
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
ret
;
}
int
recv_unicast_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
)
int
recv_unicast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
)
{
struct
unicast_packet
*
unicast_packet
;
int
hdr_size
=
sizeof
(
struct
unicast_packet
);
...
...
@@ -1222,10 +1364,10 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
return
NET_RX_SUCCESS
;
}
return
route_unicast_packet
(
skb
,
recv_if
,
hdr_size
);
return
route_unicast_packet
(
skb
,
recv_if
);
}
int
recv_ucast_frag_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
)
int
recv_ucast_frag_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
recv_if
->
soft_iface
);
struct
unicast_frag_packet
*
unicast_packet
;
...
...
@@ -1255,89 +1397,96 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
return
NET_RX_SUCCESS
;
}
return
route_unicast_packet
(
skb
,
recv_if
,
hdr_size
);
return
route_unicast_packet
(
skb
,
recv_if
);
}
int
recv_bcast_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
)
int
recv_bcast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
recv_if
->
soft_iface
);
struct
orig_node
*
orig_node
;
struct
orig_node
*
orig_node
=
NULL
;
struct
bcast_packet
*
bcast_packet
;
struct
ethhdr
*
ethhdr
;
int
hdr_size
=
sizeof
(
struct
bcast_packet
);
int
ret
=
NET_RX_DROP
;
int32_t
seq_diff
;
/* drop packet if it has not necessary minimum size */
if
(
unlikely
(
!
pskb_may_pull
(
skb
,
hdr_size
)))
return
NET_RX_DROP
;
goto
out
;
ethhdr
=
(
struct
ethhdr
*
)
skb_mac_header
(
skb
);
/* packet with broadcast indication but unicast recipient */
if
(
!
is_broadcast_ether_addr
(
ethhdr
->
h_dest
))
return
NET_RX_DROP
;
goto
out
;
/* packet with broadcast sender address */
if
(
is_broadcast_ether_addr
(
ethhdr
->
h_source
))
return
NET_RX_DROP
;
goto
out
;
/* ignore broadcasts sent by myself */
if
(
is_my_mac
(
ethhdr
->
h_source
))
return
NET_RX_DROP
;
goto
out
;
bcast_packet
=
(
struct
bcast_packet
*
)
skb
->
data
;
/* ignore broadcasts originated by myself */
if
(
is_my_mac
(
bcast_packet
->
orig
))
return
NET_RX_DROP
;
goto
out
;
if
(
bcast_packet
->
ttl
<
2
)
return
NET_RX_DROP
;
goto
out
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
bcast_packet
->
orig
));
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
bcast_packet
->
orig
);
if
(
!
orig_node
)
{
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
NET_RX_DROP
;
}
if
(
!
orig_node
)
goto
rcu_unlock
;
rcu_read_unlock
();
spin_lock_bh
(
&
orig_node
->
bcast_seqno_lock
);
/* check whether the packet is a duplicate */
if
(
get_bit_status
(
orig_node
->
bcast_bits
,
orig_node
->
last_bcast_seqno
,
ntohl
(
bcast_packet
->
seqno
)))
{
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
NET_RX_DROP
;
}
if
(
get_bit_status
(
orig_node
->
bcast_bits
,
orig_node
->
last_bcast_seqno
,
ntohl
(
bcast_packet
->
seqno
)))
goto
spin_unlock
;
seq_diff
=
ntohl
(
bcast_packet
->
seqno
)
-
orig_node
->
last_bcast_seqno
;
/* check whether the packet is old and the host just restarted. */
if
(
window_protected
(
bat_priv
,
seq_diff
,
&
orig_node
->
bcast_seqno_reset
))
{
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
NET_RX_DROP
;
}
&
orig_node
->
bcast_seqno_reset
))
goto
spin_unlock
;
/* mark broadcast in flood history, update window position
* if required. */
if
(
bit_get_packet
(
bat_priv
,
orig_node
->
bcast_bits
,
seq_diff
,
1
))
orig_node
->
last_bcast_seqno
=
ntohl
(
bcast_packet
->
seqno
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
spin_unlock_bh
(
&
orig_node
->
bcast_seqno_lock
);
/* rebroadcast packet */
add_bcast_packet_to_list
(
bat_priv
,
skb
);
/* broadcast for me */
interface_rx
(
recv_if
->
soft_iface
,
skb
,
recv_if
,
hdr_size
);
ret
=
NET_RX_SUCCESS
;
goto
out
;
return
NET_RX_SUCCESS
;
rcu_unlock:
rcu_read_unlock
();
goto
out
;
spin_unlock:
spin_unlock_bh
(
&
orig_node
->
bcast_seqno_lock
);
out:
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
ret
;
}
int
recv_vis_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
)
int
recv_vis_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
)
{
struct
vis_packet
*
vis_packet
;
struct
ethhdr
*
ethhdr
;
...
...
net/batman-adv/routing.h
View file @
b8cec4a4
...
...
@@ -22,24 +22,25 @@
#ifndef _NET_BATMAN_ADV_ROUTING_H_
#define _NET_BATMAN_ADV_ROUTING_H_
void
slide_own_bcast_window
(
struct
batman_if
*
batman_if
);
void
slide_own_bcast_window
(
struct
hard_iface
*
hard_iface
);
void
receive_bat_packet
(
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
unsigned
char
*
hna_buff
,
int
hna_buff_len
,
struct
batman_if
*
if_incoming
);
struct
hard_iface
*
if_incoming
);
void
update_routes
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
,
unsigned
char
*
hna_buff
,
int
hna_buff_len
);
int
route_unicast_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
,
int
hdr_size
);
int
recv_icmp_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
);
int
recv_unicast_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
);
int
recv_ucast_frag_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
);
int
recv_bcast_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
);
int
recv_vis_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
);
int
recv_bat_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
);
int
route_unicast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_icmp_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_unicast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_ucast_frag_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_bcast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_vis_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_bat_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
struct
neigh_node
*
find_router
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
batman_if
*
recv_if
);
void
update_bonding_candidates
(
struct
orig_node
*
orig_node
);
struct
orig_node
*
orig_node
,
struct
hard_iface
*
recv_if
);
void
bonding_candidate_del
(
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
);
#endif
/* _NET_BATMAN_ADV_ROUTING_H_ */
net/batman-adv/send.c
View file @
b8cec4a4
...
...
@@ -56,20 +56,20 @@ static unsigned long forward_send_time(void)
/* send out an already prepared packet to the given address via the
* specified batman interface */
int
send_skb_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
batman_if
,
struct
hard_iface
*
hard_iface
,
uint8_t
*
dst_addr
)
{
struct
ethhdr
*
ethhdr
;
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
if
(
hard_iface
->
if_status
!=
IF_ACTIVE
)
goto
send_skb_err
;
if
(
unlikely
(
!
batman_if
->
net_dev
))
if
(
unlikely
(
!
hard_iface
->
net_dev
))
goto
send_skb_err
;
if
(
!
(
batman_if
->
net_dev
->
flags
&
IFF_UP
))
{
if
(
!
(
hard_iface
->
net_dev
->
flags
&
IFF_UP
))
{
pr_warning
(
"Interface %s is not up - can't send packet via "
"that interface!
\n
"
,
batman_if
->
net_dev
->
name
);
"that interface!
\n
"
,
hard_iface
->
net_dev
->
name
);
goto
send_skb_err
;
}
...
...
@@ -80,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb,
skb_reset_mac_header
(
skb
);
ethhdr
=
(
struct
ethhdr
*
)
skb_mac_header
(
skb
);
memcpy
(
ethhdr
->
h_source
,
batman_if
->
net_dev
->
dev_addr
,
ETH_ALEN
);
memcpy
(
ethhdr
->
h_source
,
hard_iface
->
net_dev
->
dev_addr
,
ETH_ALEN
);
memcpy
(
ethhdr
->
h_dest
,
dst_addr
,
ETH_ALEN
);
ethhdr
->
h_proto
=
__constant_htons
(
ETH_P_BATMAN
);
...
...
@@ -88,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb,
skb
->
priority
=
TC_PRIO_CONTROL
;
skb
->
protocol
=
__constant_htons
(
ETH_P_BATMAN
);
skb
->
dev
=
batman_if
->
net_dev
;
skb
->
dev
=
hard_iface
->
net_dev
;
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
...
...
@@ -102,16 +102,16 @@ int send_skb_packet(struct sk_buff *skb,
/* Send a packet to a given interface */
static
void
send_packet_to_if
(
struct
forw_packet
*
forw_packet
,
struct
batman_if
*
batman_if
)
struct
hard_iface
*
hard_iface
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
bat_priv
*
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
char
*
fwd_str
;
uint8_t
packet_num
;
int16_t
buff_pos
;
struct
batman_packet
*
batman_packet
;
struct
sk_buff
*
skb
;
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
if
(
hard_iface
->
if_status
!=
IF_ACTIVE
)
return
;
packet_num
=
0
;
...
...
@@ -126,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
/* we might have aggregated direct link packets with an
* ordinary base packet */
if
((
forw_packet
->
direct_link_flags
&
(
1
<<
packet_num
))
&&
(
forw_packet
->
if_incoming
==
batman_if
))
(
forw_packet
->
if_incoming
==
hard_iface
))
batman_packet
->
flags
|=
DIRECTLINK
;
else
batman_packet
->
flags
&=
~
DIRECTLINK
;
...
...
@@ -142,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
batman_packet
->
tq
,
batman_packet
->
ttl
,
(
batman_packet
->
flags
&
DIRECTLINK
?
"on"
:
"off"
),
batman_if
->
net_dev
->
name
,
batman_if
->
net_dev
->
dev_addr
);
hard_iface
->
net_dev
->
name
,
hard_iface
->
net_dev
->
dev_addr
);
buff_pos
+=
sizeof
(
struct
batman_packet
)
+
(
batman_packet
->
num_hna
*
ETH_ALEN
);
...
...
@@ -154,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
/* create clone because function is called more than once */
skb
=
skb_clone
(
forw_packet
->
skb
,
GFP_ATOMIC
);
if
(
skb
)
send_skb_packet
(
skb
,
batman_if
,
broadcast_addr
);
send_skb_packet
(
skb
,
hard_iface
,
broadcast_addr
);
}
/* send a batman packet */
static
void
send_packet
(
struct
forw_packet
*
forw_packet
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
struct
net_device
*
soft_iface
;
struct
bat_priv
*
bat_priv
;
struct
batman_packet
*
batman_packet
=
...
...
@@ -204,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet)
/* broadcast on every interface */
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
(
batman_if
->
soft_iface
!=
soft_iface
)
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
(
hard_iface
->
soft_iface
!=
soft_iface
)
continue
;
send_packet_to_if
(
forw_packet
,
batman_if
);
send_packet_to_if
(
forw_packet
,
hard_iface
);
}
rcu_read_unlock
();
}
static
void
rebuild_batman_packet
(
struct
bat_priv
*
bat_priv
,
struct
batman_if
*
batman_if
)
struct
hard_iface
*
hard_iface
)
{
int
new_len
;
unsigned
char
*
new_buff
;
...
...
@@ -226,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
/* keep old buffer if kmalloc should fail */
if
(
new_buff
)
{
memcpy
(
new_buff
,
batman_if
->
packet_buff
,
memcpy
(
new_buff
,
hard_iface
->
packet_buff
,
sizeof
(
struct
batman_packet
));
batman_packet
=
(
struct
batman_packet
*
)
new_buff
;
...
...
@@ -234,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
new_buff
+
sizeof
(
struct
batman_packet
),
new_len
-
sizeof
(
struct
batman_packet
));
kfree
(
batman_if
->
packet_buff
);
batman_if
->
packet_buff
=
new_buff
;
batman_if
->
packet_len
=
new_len
;
kfree
(
hard_iface
->
packet_buff
);
hard_iface
->
packet_buff
=
new_buff
;
hard_iface
->
packet_len
=
new_len
;
}
}
void
schedule_own_packet
(
struct
batman_if
*
batman_if
)
void
schedule_own_packet
(
struct
hard_iface
*
hard_iface
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
batman_if
->
soft_iface
);
struct
bat_priv
*
bat_priv
=
netdev_priv
(
hard_iface
->
soft_iface
);
unsigned
long
send_time
;
struct
batman_packet
*
batman_packet
;
int
vis_server
;
if
((
batman_if
->
if_status
==
IF_NOT_IN_USE
)
||
(
batman_if
->
if_status
==
IF_TO_BE_REMOVED
))
if
((
hard_iface
->
if_status
==
IF_NOT_IN_USE
)
||
(
hard_iface
->
if_status
==
IF_TO_BE_REMOVED
))
return
;
vis_server
=
atomic_read
(
&
bat_priv
->
vis_mode
);
...
...
@@ -260,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if)
* outdated packets (especially uninitialized mac addresses) in the
* packet queue
*/
if
(
batman_if
->
if_status
==
IF_TO_BE_ACTIVATED
)
batman_if
->
if_status
=
IF_ACTIVE
;
if
(
hard_iface
->
if_status
==
IF_TO_BE_ACTIVATED
)
hard_iface
->
if_status
=
IF_ACTIVE
;
/* if local hna has changed and interface is a primary interface */
if
((
atomic_read
(
&
bat_priv
->
hna_local_changed
))
&&
(
batman_if
==
bat_priv
->
primary_if
))
rebuild_batman_packet
(
bat_priv
,
batman_if
);
(
hard_iface
==
bat_priv
->
primary_if
))
rebuild_batman_packet
(
bat_priv
,
hard_iface
);
/**
* NOTE: packet_buff might just have been re-allocated in
* rebuild_batman_packet()
*/
batman_packet
=
(
struct
batman_packet
*
)
batman_if
->
packet_buff
;
batman_packet
=
(
struct
batman_packet
*
)
hard_iface
->
packet_buff
;
/* change sequence number to network order */
batman_packet
->
seqno
=
htonl
((
uint32_t
)
atomic_read
(
&
batman_if
->
seqno
));
htonl
((
uint32_t
)
atomic_read
(
&
hard_iface
->
seqno
));
if
(
vis_server
==
VIS_TYPE_SERVER_SYNC
)
batman_packet
->
flags
|=
VIS_SERVER
;
else
batman_packet
->
flags
&=
~
VIS_SERVER
;
if
((
batman_if
==
bat_priv
->
primary_if
)
&&
if
((
hard_iface
==
bat_priv
->
primary_if
)
&&
(
atomic_read
(
&
bat_priv
->
gw_mode
)
==
GW_MODE_SERVER
))
batman_packet
->
gw_flags
=
(
uint8_t
)
atomic_read
(
&
bat_priv
->
gw_bandwidth
);
else
batman_packet
->
gw_flags
=
0
;
atomic_inc
(
&
batman_if
->
seqno
);
atomic_inc
(
&
hard_iface
->
seqno
);
slide_own_bcast_window
(
batman_if
);
slide_own_bcast_window
(
hard_iface
);
send_time
=
own_send_time
(
bat_priv
);
add_bat_packet_to_list
(
bat_priv
,
batman_if
->
packet_buff
,
batman_if
->
packet_len
,
batman_if
,
1
,
send_time
);
hard_iface
->
packet_buff
,
hard_iface
->
packet_len
,
hard_iface
,
1
,
send_time
);
}
void
schedule_forward_packet
(
struct
orig_node
*
orig_node
,
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
uint8_t
directlink
,
int
hna_buff_len
,
struct
batman_if
*
if_incoming
)
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
unsigned
char
in_tq
,
in_ttl
,
tq_avg
=
0
;
...
...
@@ -326,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
if
((
orig_node
->
router
)
&&
(
orig_node
->
router
->
tq_avg
!=
0
))
{
/* rebroadcast ogm of best ranking neighbor as is */
if
(
!
compare_
orig
(
orig_node
->
router
->
addr
,
ethhdr
->
h_source
))
{
if
(
!
compare_
eth
(
orig_node
->
router
->
addr
,
ethhdr
->
h_source
))
{
batman_packet
->
tq
=
orig_node
->
router
->
tq_avg
;
if
(
orig_node
->
router
->
last_ttl
)
...
...
@@ -443,7 +444,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
static
void
send_outstanding_bcast_packet
(
struct
work_struct
*
work
)
{
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
struct
delayed_work
*
delayed_work
=
container_of
(
work
,
struct
delayed_work
,
work
);
struct
forw_packet
*
forw_packet
=
...
...
@@ -461,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* rebroadcast packet */
rcu_read_lock
();
list_for_each_entry_rcu
(
batman_if
,
&
if_list
,
list
)
{
if
(
batman_if
->
soft_iface
!=
soft_iface
)
list_for_each_entry_rcu
(
hard_iface
,
&
hard
if_list
,
list
)
{
if
(
hard_iface
->
soft_iface
!=
soft_iface
)
continue
;
/* send a copy of the saved skb */
skb1
=
skb_clone
(
forw_packet
->
skb
,
GFP_ATOMIC
);
if
(
skb1
)
send_skb_packet
(
skb1
,
batman_if
,
broadcast_addr
);
send_skb_packet
(
skb1
,
hard_iface
,
broadcast_addr
);
}
rcu_read_unlock
();
...
...
@@ -521,15 +522,15 @@ void send_outstanding_bat_packet(struct work_struct *work)
}
void
purge_outstanding_packets
(
struct
bat_priv
*
bat_priv
,
struct
batman_if
*
batman_if
)
struct
hard_iface
*
hard_iface
)
{
struct
forw_packet
*
forw_packet
;
struct
hlist_node
*
tmp_node
,
*
safe_tmp_node
;
if
(
batman_if
)
if
(
hard_iface
)
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"purge_outstanding_packets(): %s
\n
"
,
batman_if
->
net_dev
->
name
);
hard_iface
->
net_dev
->
name
);
else
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"purge_outstanding_packets()
\n
"
);
...
...
@@ -543,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
* if purge_outstanding_packets() was called with an argmument
* we delete only packets belonging to the given interface
*/
if
((
batman_if
)
&&
(
forw_packet
->
if_incoming
!=
batman_if
))
if
((
hard_iface
)
&&
(
forw_packet
->
if_incoming
!=
hard_iface
))
continue
;
spin_unlock_bh
(
&
bat_priv
->
forw_bcast_list_lock
);
...
...
@@ -567,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
* if purge_outstanding_packets() was called with an argmument
* we delete only packets belonging to the given interface
*/
if
((
batman_if
)
&&
(
forw_packet
->
if_incoming
!=
batman_if
))
if
((
hard_iface
)
&&
(
forw_packet
->
if_incoming
!=
hard_iface
))
continue
;
spin_unlock_bh
(
&
bat_priv
->
forw_bat_list_lock
);
...
...
net/batman-adv/send.h
View file @
b8cec4a4
...
...
@@ -23,17 +23,17 @@
#define _NET_BATMAN_ADV_SEND_H_
int
send_skb_packet
(
struct
sk_buff
*
skb
,
struct
batman_if
*
batman_if
,
struct
hard_iface
*
hard_iface
,
uint8_t
*
dst_addr
);
void
schedule_own_packet
(
struct
batman_if
*
batman_if
);
void
schedule_own_packet
(
struct
hard_iface
*
hard_iface
);
void
schedule_forward_packet
(
struct
orig_node
*
orig_node
,
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
uint8_t
directlink
,
int
hna_buff_len
,
struct
batman_if
*
if_outgoing
);
struct
hard_iface
*
if_outgoing
);
int
add_bcast_packet_to_list
(
struct
bat_priv
*
bat_priv
,
struct
sk_buff
*
skb
);
void
send_outstanding_bat_packet
(
struct
work_struct
*
work
);
void
purge_outstanding_packets
(
struct
bat_priv
*
bat_priv
,
struct
batman_if
*
batman_if
);
struct
hard_iface
*
hard_iface
);
#endif
/* _NET_BATMAN_ADV_SEND_H_ */
net/batman-adv/soft-interface.c
View file @
b8cec4a4
...
...
@@ -29,14 +29,12 @@
#include "hash.h"
#include "gateway_common.h"
#include "gateway_client.h"
#include "send.h"
#include "bat_sysfs.h"
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include "unicast.h"
#include "routing.h"
static
int
bat_get_settings
(
struct
net_device
*
dev
,
struct
ethtool_cmd
*
cmd
);
...
...
@@ -78,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
return
0
;
}
static
void
softif_neigh_free_r
ef
(
struct
kref
*
refcount
)
static
void
softif_neigh_free_r
cu
(
struct
rcu_head
*
rcu
)
{
struct
softif_neigh
*
softif_neigh
;
softif_neigh
=
container_of
(
r
efcount
,
struct
softif_neigh
,
refcount
);
softif_neigh
=
container_of
(
r
cu
,
struct
softif_neigh
,
rcu
);
kfree
(
softif_neigh
);
}
static
void
softif_neigh_free_r
cu
(
struct
rcu_head
*
rcu
)
static
void
softif_neigh_free_r
ef
(
struct
softif_neigh
*
softif_neigh
)
{
struct
softif_neigh
*
softif_neigh
;
softif_neigh
=
container_of
(
rcu
,
struct
softif_neigh
,
rcu
);
kref_put
(
&
softif_neigh
->
refcount
,
softif_neigh_free_ref
);
if
(
atomic_dec_and_test
(
&
softif_neigh
->
refcount
))
call_rcu
(
&
softif_neigh
->
rcu
,
softif_neigh_free_rcu
);
}
void
softif_neigh_purge
(
struct
bat_priv
*
bat_priv
)
...
...
@@ -118,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
softif_neigh
->
addr
,
softif_neigh
->
vid
);
softif_neigh_tmp
=
bat_priv
->
softif_neigh
;
bat_priv
->
softif_neigh
=
NULL
;
kref_put
(
&
softif_neigh_tmp
->
refcount
,
softif_neigh_free_ref
);
softif_neigh_free_ref
(
softif_neigh_tmp
);
}
call_rcu
(
&
softif_neigh
->
rcu
,
softif_neigh_free_rcu
);
softif_neigh_free_ref
(
softif_neigh
);
}
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
...
...
@@ -137,14 +132,17 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh
,
node
,
&
bat_priv
->
softif_neigh_list
,
list
)
{
if
(
memcmp
(
softif_neigh
->
addr
,
addr
,
ETH_ALEN
)
!=
0
)
if
(
!
compare_eth
(
softif_neigh
->
addr
,
addr
)
)
continue
;
if
(
softif_neigh
->
vid
!=
vid
)
continue
;
if
(
!
atomic_inc_not_zero
(
&
softif_neigh
->
refcount
))
continue
;
softif_neigh
->
last_seen
=
jiffies
;
goto
found
;
goto
out
;
}
softif_neigh
=
kzalloc
(
sizeof
(
struct
softif_neigh
),
GFP_ATOMIC
);
...
...
@@ -154,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
memcpy
(
softif_neigh
->
addr
,
addr
,
ETH_ALEN
);
softif_neigh
->
vid
=
vid
;
softif_neigh
->
last_seen
=
jiffies
;
kref_init
(
&
softif_neigh
->
refcount
);
/* initialize with 2 - caller decrements counter by one */
atomic_set
(
&
softif_neigh
->
refcount
,
2
);
INIT_HLIST_NODE
(
&
softif_neigh
->
list
);
spin_lock_bh
(
&
bat_priv
->
softif_neigh_lock
);
hlist_add_head_rcu
(
&
softif_neigh
->
list
,
&
bat_priv
->
softif_neigh_list
);
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
found:
kref_get
(
&
softif_neigh
->
refcount
);
out:
rcu_read_unlock
();
return
softif_neigh
;
...
...
@@ -174,8 +171,6 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
softif_neigh
*
softif_neigh
;
struct
hlist_node
*
node
;
size_t
buf_size
,
pos
;
char
*
buff
;
if
(
!
bat_priv
->
primary_if
)
{
return
seq_printf
(
seq
,
"BATMAN mesh %s disabled - "
...
...
@@ -185,33 +180,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
seq_printf
(
seq
,
"Softif neighbor list (%s)
\n
"
,
net_dev
->
name
);
buf_size
=
1
;
/* Estimate length for: " xx:xx:xx:xx:xx:xx\n" */
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh
,
node
,
&
bat_priv
->
softif_neigh_list
,
list
)
buf_size
+=
30
;
rcu_read_unlock
();
buff
=
kmalloc
(
buf_size
,
GFP_ATOMIC
);
if
(
!
buff
)
return
-
ENOMEM
;
buff
[
0
]
=
'\0'
;
pos
=
0
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh
,
node
,
&
bat_priv
->
softif_neigh_list
,
list
)
{
pos
+=
snprintf
(
buff
+
pos
,
31
,
"%s %pM (vid: %d)
\n
"
,
seq_printf
(
seq
,
"%s %pM (vid: %d)
\n
"
,
bat_priv
->
softif_neigh
==
softif_neigh
?
"=>"
:
" "
,
softif_neigh
->
addr
,
softif_neigh
->
vid
);
}
rcu_read_unlock
();
seq_printf
(
seq
,
"%s"
,
buff
);
kfree
(
buff
);
return
0
;
}
...
...
@@ -266,7 +243,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
softif_neigh
->
addr
,
softif_neigh
->
vid
);
softif_neigh_tmp
=
bat_priv
->
softif_neigh
;
bat_priv
->
softif_neigh
=
softif_neigh
;
kref_put
(
&
softif_neigh_tmp
->
refcount
,
softif_neigh_free_ref
);
softif_neigh_free_ref
(
softif_neigh_tmp
);
/* we need to hold the additional reference */
goto
err
;
}
...
...
@@ -284,7 +261,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
}
out:
kref_put
(
&
softif_neigh
->
refcount
,
softif_neigh_free_ref
);
softif_neigh_free_ref
(
softif_neigh
);
err:
kfree_skb
(
skb
);
return
;
...
...
@@ -437,7 +414,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
}
void
interface_rx
(
struct
net_device
*
soft_iface
,
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
,
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
,
int
hdr_size
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
soft_iface
);
...
...
@@ -485,7 +462,7 @@ void interface_rx(struct net_device *soft_iface,
memcpy
(
unicast_packet
->
dest
,
bat_priv
->
softif_neigh
->
addr
,
ETH_ALEN
);
ret
=
route_unicast_packet
(
skb
,
recv_if
,
hdr_size
);
ret
=
route_unicast_packet
(
skb
,
recv_if
);
if
(
ret
==
NET_RX_DROP
)
goto
dropped
;
...
...
@@ -645,6 +622,19 @@ void softif_destroy(struct net_device *soft_iface)
unregister_netdevice
(
soft_iface
);
}
int
softif_is_valid
(
struct
net_device
*
net_dev
)
{
#ifdef HAVE_NET_DEVICE_OPS
if
(
net_dev
->
netdev_ops
->
ndo_start_xmit
==
interface_tx
)
return
1
;
#else
if
(
net_dev
->
hard_start_xmit
==
interface_tx
)
return
1
;
#endif
return
0
;
}
/* ethtool */
static
int
bat_get_settings
(
struct
net_device
*
dev
,
struct
ethtool_cmd
*
cmd
)
{
...
...
net/batman-adv/soft-interface.h
View file @
b8cec4a4
...
...
@@ -27,9 +27,10 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
void
softif_neigh_purge
(
struct
bat_priv
*
bat_priv
);
int
interface_tx
(
struct
sk_buff
*
skb
,
struct
net_device
*
soft_iface
);
void
interface_rx
(
struct
net_device
*
soft_iface
,
struct
sk_buff
*
skb
,
struct
batman_if
*
recv_if
,
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
,
int
hdr_size
);
struct
net_device
*
softif_create
(
char
*
name
);
void
softif_destroy
(
struct
net_device
*
soft_iface
);
int
softif_is_valid
(
struct
net_device
*
net_dev
);
#endif
/* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
net/batman-adv/translation-table.c
View file @
b8cec4a4
...
...
@@ -30,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
struct
hna_global_entry
*
hna_global_entry
,
char
*
message
);
/* returns 1 if they are the same mac addr */
static
int
compare_lhna
(
struct
hlist_node
*
node
,
void
*
data2
)
{
void
*
data1
=
container_of
(
node
,
struct
hna_local_entry
,
hash_entry
);
return
(
memcmp
(
data1
,
data2
,
ETH_ALEN
)
==
0
?
1
:
0
);
}
/* returns 1 if they are the same mac addr */
static
int
compare_ghna
(
struct
hlist_node
*
node
,
void
*
data2
)
{
void
*
data1
=
container_of
(
node
,
struct
hna_global_entry
,
hash_entry
);
return
(
memcmp
(
data1
,
data2
,
ETH_ALEN
)
==
0
?
1
:
0
);
}
static
void
hna_local_start_timer
(
struct
bat_priv
*
bat_priv
)
{
INIT_DELAYED_WORK
(
&
bat_priv
->
hna_work
,
hna_local_purge
);
queue_delayed_work
(
bat_event_workqueue
,
&
bat_priv
->
hna_work
,
10
*
HZ
);
}
static
struct
hna_local_entry
*
hna_local_hash_find
(
struct
bat_priv
*
bat_priv
,
void
*
data
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
hna_local_hash
;
struct
hlist_head
*
head
;
struct
hlist_node
*
node
;
struct
hna_local_entry
*
hna_local_entry
,
*
hna_local_entry_tmp
=
NULL
;
int
index
;
if
(
!
hash
)
return
NULL
;
index
=
choose_orig
(
data
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna_local_entry
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
hna_local_entry
,
data
))
continue
;
hna_local_entry_tmp
=
hna_local_entry
;
break
;
}
rcu_read_unlock
();
return
hna_local_entry_tmp
;
}
static
struct
hna_global_entry
*
hna_global_hash_find
(
struct
bat_priv
*
bat_priv
,
void
*
data
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
hna_global_hash
;
struct
hlist_head
*
head
;
struct
hlist_node
*
node
;
struct
hna_global_entry
*
hna_global_entry
;
struct
hna_global_entry
*
hna_global_entry_tmp
=
NULL
;
int
index
;
if
(
!
hash
)
return
NULL
;
index
=
choose_orig
(
data
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna_global_entry
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
hna_global_entry
,
data
))
continue
;
hna_global_entry_tmp
=
hna_global_entry
;
break
;
}
rcu_read_unlock
();
return
hna_global_entry_tmp
;
}
int
hna_local_init
(
struct
bat_priv
*
bat_priv
)
{
if
(
bat_priv
->
hna_local_hash
)
...
...
@@ -60,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
int
required_bytes
;
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
hna_local_entry
=
((
struct
hna_local_entry
*
)
hash_find
(
bat_priv
->
hna_local_hash
,
compare_orig
,
choose_orig
,
addr
));
hna_local_entry
=
hna_local_hash_find
(
bat_priv
,
addr
);
spin_unlock_bh
(
&
bat_priv
->
hna_lhash_lock
);
if
(
hna_local_entry
)
{
...
...
@@ -99,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
hna_local_entry
->
last_seen
=
jiffies
;
/* the batman interface mac address should never be purged */
if
(
compare_
orig
(
addr
,
soft_iface
->
dev_addr
))
if
(
compare_
eth
(
addr
,
soft_iface
->
dev_addr
))
hna_local_entry
->
never_purge
=
1
;
else
hna_local_entry
->
never_purge
=
0
;
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
hash_add
(
bat_priv
->
hna_local_hash
,
compare_
orig
,
choose_orig
,
hna_local_entry
);
hash_add
(
bat_priv
->
hna_local_hash
,
compare_
lhna
,
choose_orig
,
hna_local_entry
,
&
hna_local_entry
->
hash_entry
);
bat_priv
->
num_local_hna
++
;
atomic_set
(
&
bat_priv
->
hna_local_changed
,
1
);
...
...
@@ -116,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
/* remove address from global hash if present */
spin_lock_bh
(
&
bat_priv
->
hna_ghash_lock
);
hna_global_entry
=
((
struct
hna_global_entry
*
)
hash_find
(
bat_priv
->
hna_global_hash
,
compare_orig
,
choose_orig
,
addr
));
hna_global_entry
=
hna_global_hash_find
(
bat_priv
,
addr
);
if
(
hna_global_entry
)
_hna_global_del_orig
(
bat_priv
,
hna_global_entry
,
...
...
@@ -132,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
{
struct
hashtable_t
*
hash
=
bat_priv
->
hna_local_hash
;
struct
hna_local_entry
*
hna_local_entry
;
struct
element_t
*
bucket
;
int
i
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
int
count
=
0
;
int
i
,
count
=
0
;
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna_local_entry
,
node
,
head
,
hash_entry
)
{
if
(
buff_len
<
(
count
+
1
)
*
ETH_ALEN
)
break
;
hna_local_entry
=
bucket
->
data
;
memcpy
(
buff
+
(
count
*
ETH_ALEN
),
hna_local_entry
->
addr
,
ETH_ALEN
);
count
++
;
}
rcu_read_unlock
();
}
/* if we did not get all new local hnas see you next time ;-) */
...
...
@@ -170,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
hashtable_t
*
hash
=
bat_priv
->
hna_local_hash
;
struct
hna_local_entry
*
hna_local_entry
;
int
i
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
size_t
buf_size
,
pos
;
char
*
buff
;
int
i
;
if
(
!
bat_priv
->
primary_if
)
{
return
seq_printf
(
seq
,
"BATMAN mesh %s disabled - "
...
...
@@ -194,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each
(
walk
,
head
)
rcu_read_lock
();
__hlist_for_each_rcu
(
node
,
head
)
buf_size
+=
21
;
rcu_read_unlock
();
}
buff
=
kmalloc
(
buf_size
,
GFP_ATOMIC
);
...
...
@@ -203,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
spin_unlock_bh
(
&
bat_priv
->
hna_lhash_lock
);
return
-
ENOMEM
;
}
buff
[
0
]
=
'\0'
;
pos
=
0
;
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
hna_local_entry
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna_local_entry
,
node
,
head
,
hash_entry
)
{
pos
+=
snprintf
(
buff
+
pos
,
22
,
" * %pM
\n
"
,
hna_local_entry
->
addr
);
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
hna_lhash_lock
);
...
...
@@ -224,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
return
0
;
}
static
void
_hna_local_del
(
void
*
data
,
void
*
arg
)
static
void
_hna_local_del
(
struct
hlist_node
*
node
,
void
*
arg
)
{
struct
bat_priv
*
bat_priv
=
(
struct
bat_priv
*
)
arg
;
void
*
data
=
container_of
(
node
,
struct
hna_local_entry
,
hash_entry
);
kfree
(
data
);
bat_priv
->
num_local_hna
--
;
...
...
@@ -240,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Deleting local hna entry (%pM): %s
\n
"
,
hna_local_entry
->
addr
,
message
);
hash_remove
(
bat_priv
->
hna_local_hash
,
compare_
orig
,
choose_orig
,
hash_remove
(
bat_priv
->
hna_local_hash
,
compare_
lhna
,
choose_orig
,
hna_local_entry
->
addr
);
_hna_local_del
(
hna_local
_entry
,
bat_priv
);
_hna_local_del
(
&
hna_local_entry
->
hash
_entry
,
bat_priv
);
}
void
hna_local_remove
(
struct
bat_priv
*
bat_priv
,
...
...
@@ -252,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
hna_local_entry
=
(
struct
hna_local_entry
*
)
hash_find
(
bat_priv
->
hna_local_hash
,
compare_orig
,
choose_orig
,
addr
);
hna_local_entry
=
hna_local_hash_find
(
bat_priv
,
addr
);
if
(
hna_local_entry
)
hna_local_del
(
bat_priv
,
hna_local_entry
,
message
);
...
...
@@ -270,25 +339,27 @@ static void hna_local_purge(struct work_struct *work)
container_of
(
delayed_work
,
struct
bat_priv
,
hna_work
);
struct
hashtable_t
*
hash
=
bat_priv
->
hna_local_hash
;
struct
hna_local_entry
*
hna_local_entry
;
int
i
;
struct
hlist_node
*
walk
,
*
safe
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
unsigned
long
timeout
;
int
i
;
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry_safe
(
bucket
,
walk
,
safe
,
head
,
hlist
)
{
hna_local_entry
=
bucket
->
data
;
hlist_for_each_entry_safe
(
hna_local_entry
,
node
,
node_tmp
,
head
,
hash_entry
)
{
if
(
hna_local_entry
->
never_purge
)
continue
;
timeout
=
hna_local_entry
->
last_seen
;
timeout
+=
LOCAL_HNA_TIMEOUT
*
HZ
;
if
((
!
hna_local_entry
->
never_purge
)
&&
time_after
(
jiffies
,
timeout
))
if
(
time_before
(
jiffies
,
timeout
))
continue
;
hna_local_del
(
bat_priv
,
hna_local_entry
,
"address timed out"
);
}
...
...
@@ -334,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
spin_lock_bh
(
&
bat_priv
->
hna_ghash_lock
);
hna_ptr
=
hna_buff
+
(
hna_buff_count
*
ETH_ALEN
);
hna_global_entry
=
(
struct
hna_global_entry
*
)
hash_find
(
bat_priv
->
hna_global_hash
,
compare_orig
,
choose_orig
,
hna_ptr
);
hna_global_entry
=
hna_global_hash_find
(
bat_priv
,
hna_ptr
);
if
(
!
hna_global_entry
)
{
spin_unlock_bh
(
&
bat_priv
->
hna_ghash_lock
);
...
...
@@ -356,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
hna_global_entry
->
addr
,
orig_node
->
orig
);
spin_lock_bh
(
&
bat_priv
->
hna_ghash_lock
);
hash_add
(
bat_priv
->
hna_global_hash
,
compare_orig
,
choose_orig
,
hna_global_entry
);
hash_add
(
bat_priv
->
hna_global_hash
,
compare_ghna
,
choose_orig
,
hna_global_entry
,
&
hna_global_entry
->
hash_entry
);
}
...
...
@@ -368,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
hna_ptr
=
hna_buff
+
(
hna_buff_count
*
ETH_ALEN
);
hna_local_entry
=
(
struct
hna_local_entry
*
)
hash_find
(
bat_priv
->
hna_local_hash
,
compare_orig
,
choose_orig
,
hna_ptr
);
hna_local_entry
=
hna_local_hash_find
(
bat_priv
,
hna_ptr
);
if
(
hna_local_entry
)
hna_local_del
(
bat_priv
,
hna_local_entry
,
...
...
@@ -400,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
hashtable_t
*
hash
=
bat_priv
->
hna_global_hash
;
struct
hna_global_entry
*
hna_global_entry
;
int
i
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
size_t
buf_size
,
pos
;
char
*
buff
;
int
i
;
if
(
!
bat_priv
->
primary_if
)
{
return
seq_printf
(
seq
,
"BATMAN mesh %s disabled - "
...
...
@@ -423,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each
(
walk
,
head
)
rcu_read_lock
();
__hlist_for_each_rcu
(
node
,
head
)
buf_size
+=
43
;
rcu_read_unlock
();
}
buff
=
kmalloc
(
buf_size
,
GFP_ATOMIC
);
...
...
@@ -438,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
hna_global_entry
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna_global_entry
,
node
,
head
,
hash_entry
)
{
pos
+=
snprintf
(
buff
+
pos
,
44
,
" * %pM via %pM
\n
"
,
hna_global_entry
->
addr
,
hna_global_entry
->
orig_node
->
orig
);
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
hna_ghash_lock
);
...
...
@@ -464,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
hna_global_entry
->
addr
,
hna_global_entry
->
orig_node
->
orig
,
message
);
hash_remove
(
bat_priv
->
hna_global_hash
,
compare_
orig
,
choose_orig
,
hash_remove
(
bat_priv
->
hna_global_hash
,
compare_
ghna
,
choose_orig
,
hna_global_entry
->
addr
);
kfree
(
hna_global_entry
);
}
...
...
@@ -483,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
while
((
hna_buff_count
+
1
)
*
ETH_ALEN
<=
orig_node
->
hna_buff_len
)
{
hna_ptr
=
orig_node
->
hna_buff
+
(
hna_buff_count
*
ETH_ALEN
);
hna_global_entry
=
(
struct
hna_global_entry
*
)
hash_find
(
bat_priv
->
hna_global_hash
,
compare_orig
,
choose_orig
,
hna_ptr
);
hna_global_entry
=
hna_global_hash_find
(
bat_priv
,
hna_ptr
);
if
((
hna_global_entry
)
&&
(
hna_global_entry
->
orig_node
==
orig_node
))
...
...
@@ -502,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
orig_node
->
hna_buff
=
NULL
;
}
static
void
hna_global_del
(
void
*
data
,
void
*
arg
)
static
void
hna_global_del
(
struct
hlist_node
*
node
,
void
*
arg
)
{
void
*
data
=
container_of
(
node
,
struct
hna_global_entry
,
hash_entry
);
kfree
(
data
);
}
...
...
@@ -519,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv)
struct
orig_node
*
transtable_search
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
)
{
struct
hna_global_entry
*
hna_global_entry
;
struct
orig_node
*
orig_node
=
NULL
;
spin_lock_bh
(
&
bat_priv
->
hna_ghash_lock
);
hna_global_entry
=
(
struct
hna_global_entry
*
)
hash_find
(
bat_priv
->
hna_global_hash
,
compare_orig
,
choose_orig
,
addr
);
spin_unlock_bh
(
&
bat_priv
->
hna_ghash_lock
);
hna_global_entry
=
hna_global_hash_find
(
bat_priv
,
addr
);
if
(
!
hna_global_entry
)
return
NULL
;
goto
out
;
return
hna_global_entry
->
orig_node
;
if
(
!
atomic_inc_not_zero
(
&
hna_global_entry
->
orig_node
->
refcount
))
goto
out
;
orig_node
=
hna_global_entry
->
orig_node
;
out:
spin_unlock_bh
(
&
bat_priv
->
hna_ghash_lock
);
return
orig_node
;
}
net/batman-adv/types.h
View file @
b8cec4a4
...
...
@@ -33,7 +33,7 @@
sizeof(struct bcast_packet))))
struct
batman_if
{
struct
hard_iface
{
struct
list_head
list
;
int16_t
if_num
;
char
if_status
;
...
...
@@ -43,7 +43,7 @@ struct batman_if {
unsigned
char
*
packet_buff
;
int
packet_len
;
struct
kobject
*
hardif_obj
;
struct
kref
refcount
;
atomic_t
refcount
;
struct
packet_type
batman_adv_ptype
;
struct
net_device
*
soft_iface
;
struct
rcu_head
rcu
;
...
...
@@ -70,8 +70,6 @@ struct orig_node {
struct
neigh_node
*
router
;
unsigned
long
*
bcast_own
;
uint8_t
*
bcast_own_sum
;
uint8_t
tq_own
;
int
tq_asym_penalty
;
unsigned
long
last_valid
;
unsigned
long
bcast_seqno_reset
;
unsigned
long
batman_seqno_reset
;
...
...
@@ -83,20 +81,28 @@ struct orig_node {
uint8_t
last_ttl
;
unsigned
long
bcast_bits
[
NUM_WORDS
];
uint32_t
last_bcast_seqno
;
struct
list_head
neigh_list
;
struct
h
list_head
neigh_list
;
struct
list_head
frag_list
;
spinlock_t
neigh_list_lock
;
/* protects neighbor list */
atomic_t
refcount
;
struct
rcu_head
rcu
;
struct
hlist_node
hash_entry
;
struct
bat_priv
*
bat_priv
;
unsigned
long
last_frag_packet
;
struct
{
uint8_t
candidates
;
struct
neigh_node
*
selected
;
}
bond
;
spinlock_t
ogm_cnt_lock
;
/* protects: bcast_own, bcast_own_sum,
* neigh_node->real_bits,
* neigh_node->real_packet_count */
spinlock_t
bcast_seqno_lock
;
/* protects bcast_bits,
* last_bcast_seqno */
atomic_t
bond_candidates
;
struct
list_head
bond_list
;
};
struct
gw_node
{
struct
hlist_node
list
;
struct
orig_node
*
orig_node
;
unsigned
long
deleted
;
struct
kref
refcount
;
atomic_t
refcount
;
struct
rcu_head
rcu
;
};
...
...
@@ -105,18 +111,20 @@ struct gw_node {
* @last_valid: when last packet via this neighbor was received
*/
struct
neigh_node
{
struct
list_head
list
;
struct
hlist_node
list
;
uint8_t
addr
[
ETH_ALEN
];
uint8_t
real_packet_count
;
uint8_t
tq_recv
[
TQ_GLOBAL_WINDOW_SIZE
];
uint8_t
tq_index
;
uint8_t
tq_avg
;
uint8_t
last_ttl
;
struct
neigh_node
*
next_bond_candidate
;
struct
list_head
bonding_list
;
unsigned
long
last_valid
;
unsigned
long
real_bits
[
NUM_WORDS
];
atomic_t
refcount
;
struct
rcu_head
rcu
;
struct
orig_node
*
orig_node
;
struct
batman_if
*
if_incoming
;
struct
hard_iface
*
if_incoming
;
};
...
...
@@ -140,7 +148,7 @@ struct bat_priv {
struct
hlist_head
softif_neigh_list
;
struct
softif_neigh
*
softif_neigh
;
struct
debug_log
*
debug_log
;
struct
batman_if
*
primary_if
;
struct
hard_iface
*
primary_if
;
struct
kobject
*
mesh_obj
;
struct
dentry
*
debug_dir
;
struct
hlist_head
forw_bat_list
;
...
...
@@ -151,12 +159,11 @@ struct bat_priv {
struct
hashtable_t
*
hna_local_hash
;
struct
hashtable_t
*
hna_global_hash
;
struct
hashtable_t
*
vis_hash
;
spinlock_t
orig_hash_lock
;
/* protects orig_hash */
spinlock_t
forw_bat_list_lock
;
/* protects forw_bat_list */
spinlock_t
forw_bcast_list_lock
;
/* protects */
spinlock_t
hna_lhash_lock
;
/* protects hna_local_hash */
spinlock_t
hna_ghash_lock
;
/* protects hna_global_hash */
spinlock_t
gw_list_lock
;
/* protects gw_list */
spinlock_t
gw_list_lock
;
/* protects gw_list
and curr_gw
*/
spinlock_t
vis_hash_lock
;
/* protects vis_hash */
spinlock_t
vis_list_lock
;
/* protects vis_info::recv_list */
spinlock_t
softif_neigh_lock
;
/* protects soft-interface neigh list */
...
...
@@ -165,7 +172,7 @@ struct bat_priv {
struct
delayed_work
hna_work
;
struct
delayed_work
orig_work
;
struct
delayed_work
vis_work
;
struct
gw_node
*
curr_gw
;
struct
gw_node
__rcu
*
curr_gw
;
/* rcu protected pointer */
struct
vis_info
*
my_vis_info
;
};
...
...
@@ -188,11 +195,13 @@ struct hna_local_entry {
uint8_t
addr
[
ETH_ALEN
];
unsigned
long
last_seen
;
char
never_purge
;
struct
hlist_node
hash_entry
;
};
struct
hna_global_entry
{
uint8_t
addr
[
ETH_ALEN
];
struct
orig_node
*
orig_node
;
struct
hlist_node
hash_entry
;
};
/**
...
...
@@ -208,7 +217,7 @@ struct forw_packet {
uint32_t
direct_link_flags
;
uint8_t
num_packets
;
struct
delayed_work
delayed_work
;
struct
batman_if
*
if_incoming
;
struct
hard_iface
*
if_incoming
;
};
/* While scanning for vis-entries of a particular vis-originator
...
...
@@ -242,6 +251,7 @@ struct vis_info {
* from. we should not reply to them. */
struct
list_head
send_list
;
struct
kref
refcount
;
struct
hlist_node
hash_entry
;
struct
bat_priv
*
bat_priv
;
/* this packet might be part of the vis send queue. */
struct
sk_buff
*
skb_packet
;
...
...
@@ -264,7 +274,7 @@ struct softif_neigh {
uint8_t
addr
[
ETH_ALEN
];
unsigned
long
last_seen
;
short
vid
;
struct
kref
refcount
;
atomic_t
refcount
;
struct
rcu_head
rcu
;
};
...
...
net/batman-adv/unicast.c
View file @
b8cec4a4
...
...
@@ -183,15 +183,10 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
(
struct
unicast_frag_packet
*
)
skb
->
data
;
*
new_skb
=
NULL
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
unicast_packet
->
orig
));
if
(
!
orig_node
)
{
pr_debug
(
"couldn't find originator in orig_hash
\n
"
);
orig_node
=
orig_hash_find
(
bat_priv
,
unicast_packet
->
orig
);
if
(
!
orig_node
)
goto
out
;
}
orig_node
->
last_frag_packet
=
jiffies
;
...
...
@@ -215,14 +210,15 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
/* if not, merge failed */
if
(
*
new_skb
)
ret
=
NET_RX_SUCCESS
;
out:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
out:
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
ret
;
}
int
frag_send_skb
(
struct
sk_buff
*
skb
,
struct
bat_priv
*
bat_priv
,
struct
batman_if
*
batman_if
,
uint8_t
dstaddr
[])
struct
hard_iface
*
hard_iface
,
uint8_t
dstaddr
[])
{
struct
unicast_packet
tmp_uc
,
*
unicast_packet
;
struct
sk_buff
*
frag_skb
;
...
...
@@ -267,12 +263,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
frag1
->
flags
=
UNI_FRAG_HEAD
|
large_tail
;
frag2
->
flags
=
large_tail
;
seqno
=
atomic_add_return
(
2
,
&
batman_if
->
frag_seqno
);
seqno
=
atomic_add_return
(
2
,
&
hard_iface
->
frag_seqno
);
frag1
->
seqno
=
htons
(
seqno
-
1
);
frag2
->
seqno
=
htons
(
seqno
);
send_skb_packet
(
skb
,
batman_if
,
dstaddr
);
send_skb_packet
(
frag_skb
,
batman_if
,
dstaddr
);
send_skb_packet
(
skb
,
hard_iface
,
dstaddr
);
send_skb_packet
(
frag_skb
,
hard_iface
,
dstaddr
);
return
NET_RX_SUCCESS
;
drop_frag:
...
...
@@ -286,40 +282,37 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
{
struct
ethhdr
*
ethhdr
=
(
struct
ethhdr
*
)
skb
->
data
;
struct
unicast_packet
*
unicast_packet
;
struct
orig_node
*
orig_node
=
NULL
;
struct
batman_if
*
batman_if
;
struct
neigh_node
*
router
;
struct
orig_node
*
orig_node
;
struct
neigh_node
*
neigh_node
;
int
data_len
=
skb
->
len
;
uint8_t
dstaddr
[
6
];
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
int
ret
=
1
;
/* get routing information */
if
(
is_multicast_ether_addr
(
ethhdr
->
h_dest
))
if
(
is_multicast_ether_addr
(
ethhdr
->
h_dest
))
{
orig_node
=
(
struct
orig_node
*
)
gw_get_selected
(
bat_priv
);
if
(
orig_node
)
goto
find_router
;
}
/* check for hna host */
if
(
!
orig_node
)
/* check for hna host - increases orig_node refcount */
orig_node
=
transtable_search
(
bat_priv
,
ethhdr
->
h_dest
);
router
=
find_router
(
bat_priv
,
orig_node
,
NULL
);
if
(
!
router
)
goto
unlock
;
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
batman_if
=
router
->
if_incoming
;
memcpy
(
dstaddr
,
router
->
addr
,
ETH_ALEN
);
find_router:
/**
* find_router():
* - if orig_node is NULL it returns NULL
* - increases neigh_nodes refcount if found.
*/
neigh_node
=
find_router
(
bat_priv
,
orig_node
,
NULL
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
!
neigh_node
)
goto
out
;
if
(
batman_if
->
if_status
!=
IF_ACTIVE
)
goto
dropped
;
if
(
neigh_node
->
if_incoming
->
if_status
!=
IF_ACTIVE
)
goto
out
;
if
(
my_skb_head_push
(
skb
,
sizeof
(
struct
unicast_packet
))
<
0
)
goto
dropped
;
goto
out
;
unicast_packet
=
(
struct
unicast_packet
*
)
skb
->
data
;
...
...
@@ -333,18 +326,24 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
if
(
atomic_read
(
&
bat_priv
->
fragmentation
)
&&
data_len
+
sizeof
(
struct
unicast_packet
)
>
batman_if
->
net_dev
->
mtu
)
{
neigh_node
->
if_incoming
->
net_dev
->
mtu
)
{
/* send frag skb decreases ttl */
unicast_packet
->
ttl
++
;
return
frag_send_skb
(
skb
,
bat_priv
,
batman_if
,
dstaddr
);
ret
=
frag_send_skb
(
skb
,
bat_priv
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
goto
out
;
}
send_skb_packet
(
skb
,
batman_if
,
dstaddr
);
return
0
;
unlock:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
dropped:
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
ret
=
0
;
goto
out
;
out:
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
if
(
ret
==
1
)
kfree_skb
(
skb
);
return
1
;
return
ret
;
}
net/batman-adv/unicast.h
View file @
b8cec4a4
...
...
@@ -32,7 +32,7 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
void
frag_list_free
(
struct
list_head
*
head
);
int
unicast_send_skb
(
struct
sk_buff
*
skb
,
struct
bat_priv
*
bat_priv
);
int
frag_send_skb
(
struct
sk_buff
*
skb
,
struct
bat_priv
*
bat_priv
,
struct
batman_if
*
batman_if
,
uint8_t
dstaddr
[]);
struct
hard_iface
*
hard_iface
,
uint8_t
dstaddr
[]);
static
inline
int
frag_can_reassemble
(
struct
sk_buff
*
skb
,
int
mtu
)
{
...
...
net/batman-adv/vis.c
View file @
b8cec4a4
...
...
@@ -68,15 +68,16 @@ static void free_info(struct kref *ref)
}
/* Compare two vis packets, used by the hashing algorithm */
static
int
vis_info_cmp
(
void
*
data1
,
void
*
data2
)
static
int
vis_info_cmp
(
struct
hlist_node
*
node
,
void
*
data2
)
{
struct
vis_info
*
d1
,
*
d2
;
struct
vis_packet
*
p1
,
*
p2
;
d1
=
data1
;
d1
=
container_of
(
node
,
struct
vis_info
,
hash_entry
);
d2
=
data2
;
p1
=
(
struct
vis_packet
*
)
d1
->
skb_packet
->
data
;
p2
=
(
struct
vis_packet
*
)
d2
->
skb_packet
->
data
;
return
compare_
orig
(
p1
->
vis_orig
,
p2
->
vis_orig
);
return
compare_
eth
(
p1
->
vis_orig
,
p2
->
vis_orig
);
}
/* hash function to choose an entry in a hash table of given size */
...
...
@@ -104,6 +105,34 @@ static int vis_info_choose(void *data, int size)
return
hash
%
size
;
}
static
struct
vis_info
*
vis_hash_find
(
struct
bat_priv
*
bat_priv
,
void
*
data
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
vis_hash
;
struct
hlist_head
*
head
;
struct
hlist_node
*
node
;
struct
vis_info
*
vis_info
,
*
vis_info_tmp
=
NULL
;
int
index
;
if
(
!
hash
)
return
NULL
;
index
=
vis_info_choose
(
data
,
hash
->
size
);
head
=
&
hash
->
table
[
index
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
vis_info
,
node
,
head
,
hash_entry
)
{
if
(
!
vis_info_cmp
(
node
,
data
))
continue
;
vis_info_tmp
=
vis_info
;
break
;
}
rcu_read_unlock
();
return
vis_info_tmp
;
}
/* insert interface to the list of interfaces of one originator, if it
* does not already exist in the list */
static
void
vis_data_insert_interface
(
const
uint8_t
*
interface
,
...
...
@@ -114,7 +143,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
struct
hlist_node
*
pos
;
hlist_for_each_entry
(
entry
,
pos
,
if_list
,
list
)
{
if
(
compare_
orig
(
entry
->
addr
,
(
void
*
)
interface
))
if
(
compare_
eth
(
entry
->
addr
,
(
void
*
)
interface
))
return
;
}
...
...
@@ -166,7 +195,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
if
(
primary
&&
entry
->
quality
==
0
)
return
sprintf
(
buff
,
"HNA %pM, "
,
entry
->
dest
);
else
if
(
compare_
orig
(
entry
->
src
,
src
))
else
if
(
compare_
eth
(
entry
->
src
,
src
))
return
sprintf
(
buff
,
"TQ %pM %d, "
,
entry
->
dest
,
entry
->
quality
);
...
...
@@ -175,9 +204,8 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
int
vis_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
{
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
vis_info
*
info
;
struct
vis_packet
*
packet
;
struct
vis_info_entry
*
entries
;
...
...
@@ -203,8 +231,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
info
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
info
,
node
,
head
,
hash_entry
)
{
packet
=
(
struct
vis_packet
*
)
info
->
skb_packet
->
data
;
entries
=
(
struct
vis_info_entry
*
)
((
char
*
)
packet
+
sizeof
(
struct
vis_packet
));
...
...
@@ -213,7 +241,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
if
(
entries
[
j
].
quality
==
0
)
continue
;
compare
=
compare_
orig
(
entries
[
j
].
src
,
packet
->
vis_orig
);
compare_
eth
(
entries
[
j
].
src
,
packet
->
vis_orig
);
vis_data_insert_interface
(
entries
[
j
].
src
,
&
vis_if_list
,
compare
);
...
...
@@ -223,7 +251,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
buf_size
+=
18
+
26
*
packet
->
entries
;
/* add primary/secondary records */
if
(
compare_
orig
(
entry
->
addr
,
packet
->
vis_orig
))
if
(
compare_
eth
(
entry
->
addr
,
packet
->
vis_orig
))
buf_size
+=
vis_data_count_prim_sec
(
&
vis_if_list
);
...
...
@@ -236,6 +264,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
kfree
(
entry
);
}
}
rcu_read_unlock
();
}
buff
=
kmalloc
(
buf_size
,
GFP_ATOMIC
);
...
...
@@ -249,8 +278,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
info
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
info
,
node
,
head
,
hash_entry
)
{
packet
=
(
struct
vis_packet
*
)
info
->
skb_packet
->
data
;
entries
=
(
struct
vis_info_entry
*
)
((
char
*
)
packet
+
sizeof
(
struct
vis_packet
));
...
...
@@ -259,7 +288,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
if
(
entries
[
j
].
quality
==
0
)
continue
;
compare
=
compare_
orig
(
entries
[
j
].
src
,
packet
->
vis_orig
);
compare_
eth
(
entries
[
j
].
src
,
packet
->
vis_orig
);
vis_data_insert_interface
(
entries
[
j
].
src
,
&
vis_if_list
,
compare
);
...
...
@@ -277,7 +306,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
entry
->
primary
);
/* add primary/secondary records */
if
(
compare_
orig
(
entry
->
addr
,
packet
->
vis_orig
))
if
(
compare_
eth
(
entry
->
addr
,
packet
->
vis_orig
))
buff_pos
+=
vis_data_read_prim_sec
(
buff
+
buff_pos
,
&
vis_if_list
);
...
...
@@ -291,6 +320,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
kfree
(
entry
);
}
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
vis_hash_lock
);
...
...
@@ -345,7 +375,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
spin_lock_bh
(
&
bat_priv
->
vis_list_lock
);
list_for_each_entry
(
entry
,
recv_list
,
list
)
{
if
(
memcmp
(
entry
->
mac
,
mac
,
ETH_ALEN
)
==
0
)
{
if
(
compare_eth
(
entry
->
mac
,
mac
)
)
{
spin_unlock_bh
(
&
bat_priv
->
vis_list_lock
);
return
1
;
}
...
...
@@ -381,8 +411,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
sizeof
(
struct
vis_packet
));
memcpy
(
search_packet
->
vis_orig
,
vis_packet
->
vis_orig
,
ETH_ALEN
);
old_info
=
hash_find
(
bat_priv
->
vis_hash
,
vis_info_cmp
,
vis_info_choose
,
&
search_elem
);
old_info
=
vis_hash_find
(
bat_priv
,
&
search_elem
);
kfree_skb
(
search_elem
.
skb_packet
);
if
(
old_info
)
{
...
...
@@ -442,7 +471,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
/* try to add it */
hash_added
=
hash_add
(
bat_priv
->
vis_hash
,
vis_info_cmp
,
vis_info_choose
,
info
);
info
,
&
info
->
hash_entry
);
if
(
hash_added
<
0
)
{
/* did not work (for some reason) */
kref_put
(
&
info
->
refcount
,
free_info
);
...
...
@@ -529,9 +558,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
struct
vis_info
*
info
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
orig_node
*
orig_node
;
struct
vis_packet
*
packet
;
int
best_tq
=
-
1
,
i
;
...
...
@@ -541,8 +569,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
if
((
orig_node
)
&&
(
orig_node
->
router
)
&&
(
orig_node
->
flags
&
VIS_SERVER
)
&&
(
orig_node
->
router
->
tq_avg
>
best_tq
))
{
...
...
@@ -551,6 +579,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
ETH_ALEN
);
}
}
rcu_read_unlock
();
}
return
best_tq
;
...
...
@@ -573,9 +602,8 @@ static bool vis_packet_full(struct vis_info *info)
static
int
generate_vis_packet
(
struct
bat_priv
*
bat_priv
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
orig_node
*
orig_node
;
struct
neigh_node
*
neigh_node
;
struct
vis_info
*
info
=
(
struct
vis_info
*
)
bat_priv
->
my_vis_info
;
...
...
@@ -587,7 +615,6 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
info
->
first_seen
=
jiffies
;
packet
->
vis_type
=
atomic_read
(
&
bat_priv
->
vis_mode
);
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
memcpy
(
packet
->
target_orig
,
broadcast_addr
,
ETH_ALEN
);
packet
->
ttl
=
TTL
;
packet
->
seqno
=
htonl
(
ntohl
(
packet
->
seqno
)
+
1
);
...
...
@@ -597,23 +624,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
if
(
packet
->
vis_type
==
VIS_TYPE_CLIENT_UPDATE
)
{
best_tq
=
find_best_vis_server
(
bat_priv
,
info
);
if
(
best_tq
<
0
)
{
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
best_tq
<
0
)
return
-
1
;
}
}
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
neigh_node
=
orig_node
->
router
;
if
(
!
neigh_node
)
continue
;
if
(
!
compare_
orig
(
neigh_node
->
addr
,
orig_node
->
orig
))
if
(
!
compare_
eth
(
neigh_node
->
addr
,
orig_node
->
orig
))
continue
;
if
(
neigh_node
->
if_incoming
->
if_status
!=
IF_ACTIVE
)
...
...
@@ -632,23 +657,19 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
entry
->
quality
=
neigh_node
->
tq_avg
;
packet
->
entries
++
;
if
(
vis_packet_full
(
info
))
{
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
return
0
;
}
if
(
vis_packet_full
(
info
))
goto
unlock
;
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
hash
=
bat_priv
->
hna_local_hash
;
spin_lock_bh
(
&
bat_priv
->
hna_lhash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
hna_local_entry
=
bucket
->
data
;
hlist_for_each_entry
(
hna_local_entry
,
node
,
head
,
hash_entry
)
{
entry
=
(
struct
vis_info_entry
*
)
skb_put
(
info
->
skb_packet
,
sizeof
(
*
entry
));
...
...
@@ -666,6 +687,10 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
spin_unlock_bh
(
&
bat_priv
->
hna_lhash_lock
);
return
0
;
unlock:
rcu_read_unlock
();
return
0
;
}
/* free old vis packets. Must be called with this vis_hash_lock
...
...
@@ -674,25 +699,22 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
{
int
i
;
struct
hashtable_t
*
hash
=
bat_priv
->
vis_hash
;
struct
hlist_node
*
walk
,
*
safe
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
vis_info
*
info
;
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry_safe
(
bucket
,
walk
,
safe
,
head
,
hlist
)
{
info
=
bucket
->
data
;
hlist_for_each_entry_safe
(
info
,
node
,
node_tmp
,
head
,
hash_entry
)
{
/* never purge own data. */
if
(
info
==
bat_priv
->
my_vis_info
)
continue
;
if
(
time_after
(
jiffies
,
info
->
first_seen
+
VIS_TIMEOUT
*
HZ
))
{
hlist_del
(
walk
);
kfree
(
bucket
);
hlist_del
(
node
);
send_list_del
(
info
);
kref_put
(
&
info
->
refcount
,
free_info
);
}
...
...
@@ -704,27 +726,24 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
struct
vis_info
*
info
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
orig_hash
;
struct
hlist_node
*
walk
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
struct
element_t
*
bucket
;
struct
orig_node
*
orig_node
;
struct
vis_packet
*
packet
;
struct
sk_buff
*
skb
;
struct
batman_if
*
batman_if
;
struct
hard_iface
*
hard_iface
;
uint8_t
dstaddr
[
ETH_ALEN
];
int
i
;
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
packet
=
(
struct
vis_packet
*
)
info
->
skb_packet
->
data
;
/* send to all routers in range. */
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
bucket
,
walk
,
head
,
hlist
)
{
orig_node
=
bucket
->
data
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
orig_node
,
node
,
head
,
hash_entry
)
{
/* if it's a vis server and reachable, send it. */
if
((
!
orig_node
)
||
(
!
orig_node
->
router
))
continue
;
...
...
@@ -737,54 +756,61 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
continue
;
memcpy
(
packet
->
target_orig
,
orig_node
->
orig
,
ETH_ALEN
);
batman_if
=
orig_node
->
router
->
if_incoming
;
hard_iface
=
orig_node
->
router
->
if_incoming
;
memcpy
(
dstaddr
,
orig_node
->
router
->
addr
,
ETH_ALEN
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
skb
=
skb_clone
(
info
->
skb_packet
,
GFP_ATOMIC
);
if
(
skb
)
send_skb_packet
(
skb
,
batman_if
,
dstaddr
);
send_skb_packet
(
skb
,
hard_iface
,
dstaddr
);
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
}
static
void
unicast_vis_packet
(
struct
bat_priv
*
bat_priv
,
struct
vis_info
*
info
)
{
struct
orig_node
*
orig_node
;
struct
neigh_node
*
neigh_node
=
NULL
;
struct
sk_buff
*
skb
;
struct
vis_packet
*
packet
;
struct
batman_if
*
batman_if
;
uint8_t
dstaddr
[
ETH_ALEN
];
spin_lock_bh
(
&
bat_priv
->
orig_hash_lock
);
packet
=
(
struct
vis_packet
*
)
info
->
skb_packet
->
data
;
orig_node
=
((
struct
orig_node
*
)
hash_find
(
bat_priv
->
orig_hash
,
compare_orig
,
choose_orig
,
packet
->
target_orig
));
if
((
!
orig_node
)
||
(
!
orig_node
->
router
))
goto
out
;
rcu_read_lock
();
orig_node
=
orig_hash_find
(
bat_priv
,
packet
->
target_orig
)
;
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
batman_if
=
orig_node
->
router
->
if_incoming
;
memcpy
(
dstaddr
,
orig_node
->
router
->
addr
,
ETH_ALEN
);
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
!
orig_node
)
goto
unlock
;
neigh_node
=
orig_node
->
router
;
if
(
!
neigh_node
)
goto
unlock
;
if
(
!
atomic_inc_not_zero
(
&
neigh_node
->
refcount
))
{
neigh_node
=
NULL
;
goto
unlock
;
}
rcu_read_unlock
();
skb
=
skb_clone
(
info
->
skb_packet
,
GFP_ATOMIC
);
if
(
skb
)
send_skb_packet
(
skb
,
batman_if
,
dstaddr
);
send_skb_packet
(
skb
,
neigh_node
->
if_incoming
,
neigh_node
->
addr
);
return
;
goto
out
;
unlock:
rcu_read_unlock
();
out:
spin_unlock_bh
(
&
bat_priv
->
orig_hash_lock
);
if
(
neigh_node
)
neigh_node_free_ref
(
neigh_node
);
if
(
orig_node
)
orig_node_free_ref
(
orig_node
);
return
;
}
/* only send one vis packet. called from send_vis_packets() */
...
...
@@ -896,7 +922,8 @@ int vis_init(struct bat_priv *bat_priv)
INIT_LIST_HEAD
(
&
bat_priv
->
vis_send_list
);
hash_added
=
hash_add
(
bat_priv
->
vis_hash
,
vis_info_cmp
,
vis_info_choose
,
bat_priv
->
my_vis_info
);
bat_priv
->
my_vis_info
,
&
bat_priv
->
my_vis_info
->
hash_entry
);
if
(
hash_added
<
0
)
{
pr_err
(
"Can't add own vis packet into hash
\n
"
);
/* not in hash, need to remove it manually. */
...
...
@@ -918,10 +945,11 @@ int vis_init(struct bat_priv *bat_priv)
}
/* Decrease the reference count on a hash item info */
static
void
free_info_ref
(
void
*
data
,
void
*
arg
)
static
void
free_info_ref
(
struct
hlist_node
*
node
,
void
*
arg
)
{
struct
vis_info
*
info
=
data
;
struct
vis_info
*
info
;
info
=
container_of
(
node
,
struct
vis_info
,
hash_entry
);
send_list_del
(
info
);
kref_put
(
&
info
->
refcount
,
free_info
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment