Commit d7a20c86 authored by David S. Miller's avatar David S. Miller

Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge

Included changes:
- ensure RecordRoute information is added to BAT_ICMP echo_request/reply only
- use VLAN_ETH_HLEN when possible
- use htons when possible
- substitute old fragmentation code with a new improved implementation by
  Martin Hundebøll
- create common header for BAT_ICMP packets to improve extendibility
- consider the network coding overhead when computing the overall room needed by
  batman headers
- add dummy soft-interface rx mode handler
- minor code refactoring and cleanups
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ccdbb6e9 a4deee1a
...@@ -24,6 +24,7 @@ batman-adv-y += bitarray.o ...@@ -24,6 +24,7 @@ batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
batman-adv-y += debugfs.o batman-adv-y += debugfs.o
batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
batman-adv-y += fragmentation.o
batman-adv-y += gateway_client.o batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o batman-adv-y += hard-interface.o
...@@ -37,4 +38,3 @@ batman-adv-y += send.o ...@@ -37,4 +38,3 @@ batman-adv-y += send.o
batman-adv-y += soft-interface.o batman-adv-y += soft-interface.o
batman-adv-y += sysfs.o batman-adv-y += sysfs.o
batman-adv-y += translation-table.o batman-adv-y += translation-table.o
batman-adv-y += unicast.o
...@@ -863,25 +863,25 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, ...@@ -863,25 +863,25 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
struct arphdr *arphdr; struct arphdr *arphdr;
uint8_t *hw_src, *hw_dst; uint8_t *hw_src, *hw_dst;
struct batadv_bla_claim_dst *bla_dst; struct batadv_bla_claim_dst *bla_dst;
uint16_t proto; __be16 proto;
int headlen; int headlen;
unsigned short vid = BATADV_NO_FLAGS; unsigned short vid = BATADV_NO_FLAGS;
int ret; int ret;
ethhdr = eth_hdr(skb); ethhdr = eth_hdr(skb);
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
vhdr = (struct vlan_ethhdr *)ethhdr; vhdr = (struct vlan_ethhdr *)ethhdr;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
vid |= BATADV_VLAN_HAS_TAG; vid |= BATADV_VLAN_HAS_TAG;
proto = ntohs(vhdr->h_vlan_encapsulated_proto); proto = vhdr->h_vlan_encapsulated_proto;
headlen = sizeof(*vhdr); headlen = sizeof(*vhdr);
} else { } else {
proto = ntohs(ethhdr->h_proto); proto = ethhdr->h_proto;
headlen = ETH_HLEN; headlen = ETH_HLEN;
} }
if (proto != ETH_P_ARP) if (proto != htons(ETH_P_ARP))
return 0; /* not a claim frame */ return 0; /* not a claim frame */
/* this must be a ARP frame. check if it is a claim. */ /* this must be a ARP frame. check if it is a claim. */
...@@ -1379,8 +1379,8 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb, ...@@ -1379,8 +1379,8 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) if (!pskb_may_pull(skb, hdr_size + VLAN_ETH_HLEN))
return 0; return 0;
vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size); vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include "send.h" #include "send.h"
#include "types.h" #include "types.h"
#include "translation-table.h" #include "translation-table.h"
#include "unicast.h"
static void batadv_dat_purge(struct work_struct *work); static void batadv_dat_purge(struct work_struct *work);
...@@ -592,9 +591,9 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv, ...@@ -592,9 +591,9 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
goto free_orig; goto free_orig;
tmp_skb = pskb_copy(skb, GFP_ATOMIC); tmp_skb = pskb_copy(skb, GFP_ATOMIC);
if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb, if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
cand[i].orig_node, cand[i].orig_node,
packet_subtype)) { packet_subtype)) {
kfree_skb(tmp_skb); kfree_skb(tmp_skb);
goto free_neigh; goto free_neigh;
} }
...@@ -990,10 +989,10 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, ...@@ -990,10 +989,10 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
* that a node not using the 4addr packet format doesn't support it. * that a node not using the 4addr packet format doesn't support it.
*/ */
if (hdr_size == sizeof(struct batadv_unicast_4addr_packet)) if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
err = batadv_unicast_4addr_send_skb(bat_priv, skb_new, err = batadv_send_skb_unicast_4addr(bat_priv, skb_new,
BATADV_P_DAT_CACHE_REPLY); BATADV_P_DAT_CACHE_REPLY);
else else
err = batadv_unicast_send_skb(bat_priv, skb_new); err = batadv_send_skb_unicast(bat_priv, skb_new);
if (!err) { if (!err) {
batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX); batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
......
/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#include "main.h"
#include "fragmentation.h"
#include "send.h"
#include "originator.h"
#include "routing.h"
#include "hard-interface.h"
#include "soft-interface.h"
/**
* batadv_frag_clear_chain - delete entries in the fragment buffer chain
* @head: head of chain with entries.
*
* Free fragments in the passed hlist. Should be called with appropriate lock.
*/
static void batadv_frag_clear_chain(struct hlist_head *head)
{
struct batadv_frag_list_entry *entry;
struct hlist_node *node;
hlist_for_each_entry_safe(entry, node, head, list) {
hlist_del(&entry->list);
kfree_skb(entry->skb);
kfree(entry);
}
}
/**
* batadv_frag_purge_orig - free fragments associated to an orig
* @orig_node: originator to free fragments from
* @check_cb: optional function to tell if an entry should be purged
*/
void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
bool (*check_cb)(struct batadv_frag_table_entry *))
{
struct batadv_frag_table_entry *chain;
uint8_t i;
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
chain = &orig_node->fragments[i];
spin_lock_bh(&orig_node->fragments[i].lock);
if (!check_cb || check_cb(chain)) {
batadv_frag_clear_chain(&orig_node->fragments[i].head);
orig_node->fragments[i].size = 0;
}
spin_unlock_bh(&orig_node->fragments[i].lock);
}
}
/**
* batadv_frag_size_limit - maximum possible size of packet to be fragmented
*
* Returns the maximum size of payload that can be fragmented.
*/
static int batadv_frag_size_limit(void)
{
int limit = BATADV_FRAG_MAX_FRAG_SIZE;
limit -= sizeof(struct batadv_frag_packet);
limit *= BATADV_FRAG_MAX_FRAGMENTS;
return limit;
}
/**
* batadv_frag_init_chain - check and prepare fragment chain for new fragment
* @chain: chain in fragments table to init
* @seqno: sequence number of the received fragment
*
* Make chain ready for a fragment with sequence number "seqno". Delete existing
* entries if they have an "old" sequence number.
*
* Caller must hold chain->lock.
*
* Returns true if chain is empty and caller can just insert the new fragment
* without searching for the right position.
*/
static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
uint16_t seqno)
{
if (chain->seqno == seqno)
return false;
if (!hlist_empty(&chain->head))
batadv_frag_clear_chain(&chain->head);
chain->size = 0;
chain->seqno = seqno;
return true;
}
/**
* batadv_frag_insert_packet - insert a fragment into a fragment chain
* @orig_node: originator that the fragment was received from
* @skb: skb to insert
* @chain_out: list head to attach complete chains of fragments to
*
* Insert a new fragment into the reverse ordered chain in the right table
* entry. The hash table entry is cleared if "old" fragments exist in it.
*
* Returns true if skb is buffered, false on error. If the chain has all the
* fragments needed to merge the packet, the chain is moved to the passed head
* to avoid locking the chain in the table.
*/
static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
struct sk_buff *skb,
struct hlist_head *chain_out)
{
struct batadv_frag_table_entry *chain;
struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
struct batadv_frag_packet *frag_packet;
uint8_t bucket;
uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
bool ret = false;
/* Linearize packet to avoid linearizing 16 packets in a row when doing
* the later merge. Non-linear merge should be added to remove this
* linearization.
*/
if (skb_linearize(skb) < 0)
goto err;
frag_packet = (struct batadv_frag_packet *)skb->data;
seqno = ntohs(frag_packet->seqno);
bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
if (!frag_entry_new)
goto err;
frag_entry_new->skb = skb;
frag_entry_new->no = frag_packet->no;
/* Select entry in the "chain table" and delete any prior fragments
* with another sequence number. batadv_frag_init_chain() returns true,
* if the list is empty at return.
*/
chain = &orig_node->fragments[bucket];
spin_lock_bh(&chain->lock);
if (batadv_frag_init_chain(chain, seqno)) {
hlist_add_head(&frag_entry_new->list, &chain->head);
chain->size = skb->len - hdr_size;
chain->timestamp = jiffies;
ret = true;
goto out;
}
/* Find the position for the new fragment. */
hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
/* Drop packet if fragment already exists. */
if (frag_entry_curr->no == frag_entry_new->no)
goto err_unlock;
/* Order fragments from highest to lowest. */
if (frag_entry_curr->no < frag_entry_new->no) {
hlist_add_before(&frag_entry_new->list,
&frag_entry_curr->list);
chain->size += skb->len - hdr_size;
chain->timestamp = jiffies;
ret = true;
goto out;
}
}
/* Reached the end of the list, so insert after 'frag_entry_curr'. */
if (likely(frag_entry_curr)) {
hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
chain->size += skb->len - hdr_size;
chain->timestamp = jiffies;
ret = true;
}
out:
if (chain->size > batadv_frag_size_limit() ||
ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
/* Clear chain if total size of either the list or the packet
* exceeds the maximum size of one merged packet.
*/
batadv_frag_clear_chain(&chain->head);
chain->size = 0;
} else if (ntohs(frag_packet->total_size) == chain->size) {
/* All fragments received. Hand over chain to caller. */
hlist_move_list(&chain->head, chain_out);
chain->size = 0;
}
err_unlock:
spin_unlock_bh(&chain->lock);
err:
if (!ret)
kfree(frag_entry_new);
return ret;
}
/**
* batadv_frag_merge_packets - merge a chain of fragments
* @chain: head of chain with fragments
* @skb: packet with total size of skb after merging
*
* Expand the first skb in the chain and copy the content of the remaining
* skb's into the expanded one. After doing so, clear the chain.
*
* Returns the merged skb or NULL on error.
*/
static struct sk_buff *
batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
{
struct batadv_frag_packet *packet;
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out = NULL;
int size, hdr_size = sizeof(struct batadv_frag_packet);
/* Make sure incoming skb has non-bogus data. */
packet = (struct batadv_frag_packet *)skb->data;
size = ntohs(packet->total_size);
if (size > batadv_frag_size_limit())
goto free;
/* Remove first entry, as this is the destination for the rest of the
* fragments.
*/
entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
hlist_del(&entry->list);
skb_out = entry->skb;
kfree(entry);
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
}
/* Move the existing MAC header to just before the payload. (Override
* the fragment header.)
*/
skb_pull_rcsum(skb_out, hdr_size);
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out);
skb_reset_transport_header(skb_out);
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry(entry, chain, list) {
size = entry->skb->len - hdr_size;
memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
size);
}
free:
/* Locking is not needed, because 'chain' is not part of any orig. */
batadv_frag_clear_chain(chain);
return skb_out;
}
/**
* batadv_frag_skb_buffer - buffer fragment for later merge
* @skb: skb to buffer
* @orig_node_src: originator that the skb is received from
*
* Add fragment to buffer and merge fragments if possible.
*
* There are three possible outcomes: 1) Packet is merged: Return true and
* set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
* to NULL; 3) Error: Return false and leave skb as is.
*/
bool batadv_frag_skb_buffer(struct sk_buff **skb,
struct batadv_orig_node *orig_node_src)
{
struct sk_buff *skb_out = NULL;
struct hlist_head head = HLIST_HEAD_INIT;
bool ret = false;
/* Add packet to buffer and table entry if merge is possible. */
if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
goto out_err;
/* Leave if more fragments are needed to merge. */
if (hlist_empty(&head))
goto out;
skb_out = batadv_frag_merge_packets(&head, *skb);
if (!skb_out)
goto out_err;
out:
*skb = skb_out;
ret = true;
out_err:
return ret;
}
/**
* batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
* @skb: skb to forward
* @recv_if: interface that the skb is received on
* @orig_node_src: originator that the skb is received from
*
* Look up the next-hop of the fragments payload and check if the merged packet
* will exceed the MTU towards the next-hop. If so, the fragment is forwarded
* without merging it.
*
* Returns true if the fragment is consumed/forwarded, false otherwise.
*/
bool batadv_frag_skb_fwd(struct sk_buff *skb,
struct batadv_hard_iface *recv_if,
struct batadv_orig_node *orig_node_src)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node_dst = NULL;
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_frag_packet *packet;
uint16_t total_size;
bool ret = false;
packet = (struct batadv_frag_packet *)skb->data;
orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
if (!orig_node_dst)
goto out;
neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
if (!neigh_node)
goto out;
/* Forward the fragment, if the merged packet would be too big to
* be assembled.
*/
total_size = ntohs(packet->total_size);
if (total_size > neigh_node->if_incoming->net_dev->mtu) {
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
skb->len + ETH_HLEN);
packet->header.ttl--;
batadv_send_skb_packet(skb, neigh_node->if_incoming,
neigh_node->addr);
ret = true;
}
out:
if (orig_node_dst)
batadv_orig_node_free_ref(orig_node_dst);
if (neigh_node)
batadv_neigh_node_free_ref(neigh_node);
return ret;
}
/**
* batadv_frag_create - create a fragment from skb
* @skb: skb to create fragment from
* @frag_head: header to use in new fragment
* @mtu: size of new fragment
*
* Split the passed skb into two fragments: A new one with size matching the
* passed mtu and the old one with the rest. The new skb contains data from the
* tail of the old skb.
*
* Returns the new fragment, NULL on error.
*/
static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
struct batadv_frag_packet *frag_head,
unsigned int mtu)
{
struct sk_buff *skb_fragment;
unsigned header_size = sizeof(*frag_head);
unsigned fragment_size = mtu - header_size;
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
if (!skb_fragment)
goto err;
skb->priority = TC_PRIO_CONTROL;
/* Eat the last mtu-bytes of the skb */
skb_reserve(skb_fragment, header_size + ETH_HLEN);
skb_split(skb, skb_fragment, skb->len - fragment_size);
/* Add the header */
skb_push(skb_fragment, header_size);
memcpy(skb_fragment->data, frag_head, header_size);
err:
return skb_fragment;
}
/**
* batadv_frag_send_packet - create up to 16 fragments from the passed skb
* @skb: skb to create fragments from
* @orig_node: final destination of the created fragments
* @neigh_node: next-hop of the created fragments
*
* Returns true on success, false otherwise.
*/
bool batadv_frag_send_packet(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node)
{
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if;
struct batadv_frag_packet frag_header;
struct sk_buff *skb_fragment;
unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned header_size = sizeof(frag_header);
unsigned max_fragment_size, max_packet_size;
/* To avoid merge and refragmentation at next-hops we never send
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
*/
mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
max_fragment_size = (mtu - header_size - ETH_HLEN);
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
/* Don't even try to fragment, if we need more than 16 fragments */
if (skb->len > max_packet_size)
goto out_err;
bat_priv = orig_node->bat_priv;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out_err;
/* Create one header to be copied to all fragments */
frag_header.header.packet_type = BATADV_UNICAST_FRAG;
frag_header.header.version = BATADV_COMPAT_VERSION;
frag_header.header.ttl = BATADV_TTL;
frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
frag_header.reserved = 0;
frag_header.no = 0;
frag_header.total_size = htons(skb->len);
memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag_header.dest, orig_node->orig, ETH_ALEN);
/* Eat and send fragments from the tail of skb */
while (skb->len > max_fragment_size) {
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
if (!skb_fragment)
goto out_err;
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb_fragment->len + ETH_HLEN);
batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
neigh_node->addr);
frag_header.no++;
/* The initial check in this function should cover this case */
if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
goto out_err;
}
/* Make room for the fragment header. */
if (batadv_skb_head_push(skb, header_size) < 0 ||
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
goto out_err;
memcpy(skb->data, &frag_header, header_size);
/* Send the last fragment */
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb->len + ETH_HLEN);
batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
return true;
out_err:
return false;
}
/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
#define _NET_BATMAN_ADV_FRAGMENTATION_H_
void batadv_frag_purge_orig(struct batadv_orig_node *orig,
bool (*check_cb)(struct batadv_frag_table_entry *));
bool batadv_frag_skb_fwd(struct sk_buff *skb,
struct batadv_hard_iface *recv_if,
struct batadv_orig_node *orig_node_src);
bool batadv_frag_skb_buffer(struct sk_buff **skb,
struct batadv_orig_node *orig_node);
bool batadv_frag_send_packet(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node);
/**
* batadv_frag_check_entry - check if a list of fragments has timed out
* @frags_entry: table entry to check
*
* Returns true if the frags entry has timed out, false otherwise.
*/
static inline bool
batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
{
if (!hlist_empty(&frags_entry->head) &&
batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
return true;
else
return false;
}
#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
...@@ -655,24 +655,29 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) ...@@ -655,24 +655,29 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
struct iphdr *iphdr; struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr; struct ipv6hdr *ipv6hdr;
struct udphdr *udphdr; struct udphdr *udphdr;
struct vlan_ethhdr *vhdr;
__be16 proto;
/* check for ethernet header */ /* check for ethernet header */
if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
return false; return false;
ethhdr = (struct ethhdr *)skb->data; ethhdr = (struct ethhdr *)skb->data;
proto = ethhdr->h_proto;
*header_len += ETH_HLEN; *header_len += ETH_HLEN;
/* check for initial vlan header */ /* check for initial vlan header */
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { if (proto == htons(ETH_P_8021Q)) {
if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
return false; return false;
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
vhdr = (struct vlan_ethhdr *)skb->data;
proto = vhdr->h_vlan_encapsulated_proto;
*header_len += VLAN_HLEN; *header_len += VLAN_HLEN;
} }
/* check for ip header */ /* check for ip header */
switch (ntohs(ethhdr->h_proto)) { switch (proto) {
case ETH_P_IP: case htons(ETH_P_IP):
if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
return false; return false;
iphdr = (struct iphdr *)(skb->data + *header_len); iphdr = (struct iphdr *)(skb->data + *header_len);
...@@ -683,7 +688,7 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) ...@@ -683,7 +688,7 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return false; return false;
break; break;
case ETH_P_IPV6: case htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
return false; return false;
ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
...@@ -710,12 +715,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) ...@@ -710,12 +715,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
*header_len += sizeof(*udphdr); *header_len += sizeof(*udphdr);
/* check for bootp port */ /* check for bootp port */
if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && if ((proto == htons(ETH_P_IP)) &&
(ntohs(udphdr->dest) != 67)) (udphdr->dest != htons(67)))
return false; return false;
if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && if ((proto == htons(ETH_P_IPV6)) &&
(ntohs(udphdr->dest) != 547)) (udphdr->dest != htons(547)))
return false; return false;
return true; return true;
......
...@@ -269,9 +269,10 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) ...@@ -269,9 +269,10 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
const struct batadv_priv *bat_priv = netdev_priv(soft_iface); const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
const struct batadv_hard_iface *hard_iface; const struct batadv_hard_iface *hard_iface;
/* allow big frames if all devices are capable to do so /* allow big frames if all devices are capable to do so
* (have MTU > 1500 + BAT_HEADER_LEN) * (have MTU > 1500 + batadv_max_header_len())
*/ */
int min_mtu = ETH_DATA_LEN; int min_mtu = ETH_DATA_LEN;
int max_header_len = batadv_max_header_len();
if (atomic_read(&bat_priv->fragmentation)) if (atomic_read(&bat_priv->fragmentation))
goto out; goto out;
...@@ -285,8 +286,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) ...@@ -285,8 +286,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
if (hard_iface->soft_iface != soft_iface) if (hard_iface->soft_iface != soft_iface)
continue; continue;
min_mtu = min_t(int, min_mtu = min_t(int, hard_iface->net_dev->mtu - max_header_len,
hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
min_mtu); min_mtu);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -379,7 +379,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, ...@@ -379,7 +379,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
{ {
struct batadv_priv *bat_priv; struct batadv_priv *bat_priv;
struct net_device *soft_iface, *master; struct net_device *soft_iface, *master;
__be16 ethertype = __constant_htons(ETH_P_BATMAN); __be16 ethertype = htons(ETH_P_BATMAN);
int max_header_len = batadv_max_header_len();
int ret; int ret;
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
...@@ -444,23 +445,22 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, ...@@ -444,23 +445,22 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
dev_add_pack(&hard_iface->batman_adv_ptype); dev_add_pack(&hard_iface->batman_adv_ptype);
atomic_set(&hard_iface->frag_seqno, 1);
batadv_info(hard_iface->soft_iface, "Adding interface: %s\n", batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
hard_iface->net_dev->name); hard_iface->net_dev->name);
if (atomic_read(&bat_priv->fragmentation) && if (atomic_read(&bat_priv->fragmentation) &&
hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN) hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
batadv_info(hard_iface->soft_iface, batadv_info(hard_iface->soft_iface,
"The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n", "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
hard_iface->net_dev->name, hard_iface->net_dev->mtu, hard_iface->net_dev->name, hard_iface->net_dev->mtu,
ETH_DATA_LEN + BATADV_HEADER_LEN); ETH_DATA_LEN + max_header_len);
if (!atomic_read(&bat_priv->fragmentation) && if (!atomic_read(&bat_priv->fragmentation) &&
hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN) hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
batadv_info(hard_iface->soft_iface, batadv_info(hard_iface->soft_iface,
"The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n", "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
hard_iface->net_dev->name, hard_iface->net_dev->mtu, hard_iface->net_dev->name, hard_iface->net_dev->mtu,
ETH_DATA_LEN + BATADV_HEADER_LEN); ETH_DATA_LEN + max_header_len);
if (batadv_hardif_is_iface_up(hard_iface)) if (batadv_hardif_is_iface_up(hard_iface))
batadv_hardif_activate_interface(hard_iface); batadv_hardif_activate_interface(hard_iface);
......
...@@ -192,25 +192,25 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, ...@@ -192,25 +192,25 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto free_skb; goto free_skb;
} }
if (icmp_packet->header.packet_type != BATADV_ICMP) { if (icmp_packet->icmph.header.packet_type != BATADV_ICMP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL; len = -EINVAL;
goto free_skb; goto free_skb;
} }
if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) { if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n"); "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
len = -EINVAL; len = -EINVAL;
goto free_skb; goto free_skb;
} }
icmp_packet->uid = socket_client->index; icmp_packet->icmph.uid = socket_client->index;
if (icmp_packet->header.version != BATADV_COMPAT_VERSION) { if (icmp_packet->icmph.header.version != BATADV_COMPAT_VERSION) {
icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM; icmp_packet->icmph.msg_type = BATADV_PARAMETER_PROBLEM;
icmp_packet->header.version = BATADV_COMPAT_VERSION; icmp_packet->icmph.header.version = BATADV_COMPAT_VERSION;
batadv_socket_add_packet(socket_client, icmp_packet, batadv_socket_add_packet(socket_client, icmp_packet,
packet_len); packet_len);
goto free_skb; goto free_skb;
...@@ -219,7 +219,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, ...@@ -219,7 +219,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dst_unreach; goto dst_unreach;
orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst); orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.dst);
if (!orig_node) if (!orig_node)
goto dst_unreach; goto dst_unreach;
...@@ -233,7 +233,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, ...@@ -233,7 +233,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE) if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
goto dst_unreach; goto dst_unreach;
memcpy(icmp_packet->orig, memcpy(icmp_packet->icmph.orig,
primary_if->net_dev->dev_addr, ETH_ALEN); primary_if->net_dev->dev_addr, ETH_ALEN);
if (packet_len == sizeof(struct batadv_icmp_packet_rr)) if (packet_len == sizeof(struct batadv_icmp_packet_rr))
...@@ -244,7 +244,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, ...@@ -244,7 +244,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto out; goto out;
dst_unreach: dst_unreach:
icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE; icmp_packet->icmph.msg_type = BATADV_DESTINATION_UNREACHABLE;
batadv_socket_add_packet(socket_client, icmp_packet, packet_len); batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
...@@ -318,7 +318,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client, ...@@ -318,7 +318,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
/* while waiting for the lock the socket_client could have been /* while waiting for the lock the socket_client could have been
* deleted * deleted
*/ */
if (!batadv_socket_client_hash[icmp_packet->uid]) { if (!batadv_socket_client_hash[icmp_packet->icmph.uid]) {
spin_unlock_bh(&socket_client->lock); spin_unlock_bh(&socket_client->lock);
kfree(socket_packet); kfree(socket_packet);
return; return;
...@@ -347,7 +347,7 @@ void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet, ...@@ -347,7 +347,7 @@ void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
{ {
struct batadv_socket_client *hash; struct batadv_socket_client *hash;
hash = batadv_socket_client_hash[icmp_packet->uid]; hash = batadv_socket_client_hash[icmp_packet->icmph.uid];
if (hash) if (hash)
batadv_socket_add_packet(hash, icmp_packet, icmp_len); batadv_socket_add_packet(hash, icmp_packet, icmp_len);
} }
...@@ -36,11 +36,11 @@ ...@@ -36,11 +36,11 @@
#include "gateway_client.h" #include "gateway_client.h"
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h" #include "distributed-arp-table.h"
#include "unicast.h"
#include "gateway_common.h" #include "gateway_common.h"
#include "hash.h" #include "hash.h"
#include "bat_algo.h" #include "bat_algo.h"
#include "network-coding.h" #include "network-coding.h"
#include "fragmentation.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed, /* List manipulations on hardif_list have to be rtnl_lock()'ed,
...@@ -255,6 +255,31 @@ batadv_seq_print_text_primary_if_get(struct seq_file *seq) ...@@ -255,6 +255,31 @@ batadv_seq_print_text_primary_if_get(struct seq_file *seq)
return primary_if; return primary_if;
} }
/**
* batadv_max_header_len - calculate maximum encapsulation overhead for a
* payload packet
*
* Return the maximum encapsulation overhead in bytes.
*/
int batadv_max_header_len(void)
{
int header_len = 0;
header_len = max_t(int, header_len,
sizeof(struct batadv_unicast_packet));
header_len = max_t(int, header_len,
sizeof(struct batadv_unicast_4addr_packet));
header_len = max_t(int, header_len,
sizeof(struct batadv_bcast_packet));
#ifdef CONFIG_BATMAN_ADV_NC
header_len = max_t(int, header_len,
sizeof(struct batadv_coded_packet));
#endif
return header_len;
}
/** /**
* batadv_skb_set_priority - sets skb priority according to packet content * batadv_skb_set_priority - sets skb priority according to packet content
* @skb: the packet to be sent * @skb: the packet to be sent
...@@ -399,10 +424,10 @@ static void batadv_recv_handler_init(void) ...@@ -399,10 +424,10 @@ static void batadv_recv_handler_init(void)
/* compile time checks for struct member offsets */ /* compile time checks for struct member offsets */
BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10); BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10);
BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
BUILD_BUG_ON(offsetof(struct batadv_unicast_frag_packet, dest) != 4);
BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4); BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4); BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
/* broadcast packet */ /* broadcast packet */
batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
...@@ -412,12 +437,12 @@ static void batadv_recv_handler_init(void) ...@@ -412,12 +437,12 @@ static void batadv_recv_handler_init(void)
batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet; batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
/* unicast packet */ /* unicast packet */
batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
/* fragmented unicast packet */
batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
/* unicast tvlv packet */ /* unicast tvlv packet */
batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv; batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
/* batman icmp packet */ /* batman icmp packet */
batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
/* Fragmented packets */
batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
} }
int int
......
...@@ -131,6 +131,15 @@ enum batadv_uev_type { ...@@ -131,6 +131,15 @@ enum batadv_uev_type {
#define BATADV_GW_THRESHOLD 50 #define BATADV_GW_THRESHOLD 50
/* Number of fragment chains for each orig_node */
#define BATADV_FRAG_BUFFER_COUNT 8
/* Maximum number of fragments for one packet */
#define BATADV_FRAG_MAX_FRAGMENTS 16
/* Maxumim size of each fragment */
#define BATADV_FRAG_MAX_FRAG_SIZE 1400
/* Time to keep fragments while waiting for rest of the fragments */
#define BATADV_FRAG_TIMEOUT 10000
#define BATADV_DAT_CANDIDATE_NOT_FOUND 0 #define BATADV_DAT_CANDIDATE_NOT_FOUND 0
#define BATADV_DAT_CANDIDATE_ORIG 1 #define BATADV_DAT_CANDIDATE_ORIG 1
...@@ -182,6 +191,7 @@ void batadv_mesh_free(struct net_device *soft_iface); ...@@ -182,6 +191,7 @@ void batadv_mesh_free(struct net_device *soft_iface);
int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
struct batadv_hard_iface * struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq); batadv_seq_print_text_primary_if_get(struct seq_file *seq);
int batadv_max_header_len(void);
void batadv_skb_set_priority(struct sk_buff *skb, int offset); void batadv_skb_set_priority(struct sk_buff *skb, int offset);
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct packet_type *ptype,
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include "routing.h" #include "routing.h"
#include "gateway_client.h" #include "gateway_client.h"
#include "hard-interface.h" #include "hard-interface.h"
#include "unicast.h"
#include "soft-interface.h" #include "soft-interface.h"
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
#include "network-coding.h" #include "network-coding.h"
#include "fragmentation.h"
/* hash class keys */ /* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key; static struct lock_class_key batadv_orig_hash_lock_class_key;
...@@ -146,7 +146,8 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) ...@@ -146,7 +146,8 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
/* Free nc_nodes */ /* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
batadv_frag_list_free(&orig_node->frag_list); batadv_frag_purge_orig(orig_node, NULL);
batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
"originator timed out"); "originator timed out");
...@@ -217,7 +218,7 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, ...@@ -217,7 +218,7 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
const uint8_t *addr) const uint8_t *addr)
{ {
struct batadv_orig_node *orig_node; struct batadv_orig_node *orig_node;
int size; int size, i;
int hash_added; int hash_added;
unsigned long reset_time; unsigned long reset_time;
...@@ -269,8 +270,11 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, ...@@ -269,8 +270,11 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
size = bat_priv->num_ifaces * sizeof(uint8_t); size = bat_priv->num_ifaces * sizeof(uint8_t);
orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
INIT_LIST_HEAD(&orig_node->frag_list); for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
orig_node->last_frag_packet = 0; INIT_HLIST_HEAD(&orig_node->fragments[i].head);
spin_lock_init(&orig_node->fragments[i].lock);
orig_node->fragments[i].size = 0;
}
if (!orig_node->bcast_own_sum) if (!orig_node->bcast_own_sum)
goto free_bcast_own; goto free_bcast_own;
...@@ -394,9 +398,8 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) ...@@ -394,9 +398,8 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
continue; continue;
} }
if (batadv_has_timed_out(orig_node->last_frag_packet, batadv_frag_purge_orig(orig_node,
BATADV_FRAG_TIMEOUT)) batadv_frag_check_entry);
batadv_frag_list_free(&orig_node->frag_list);
} }
spin_unlock_bh(list_lock); spin_unlock_bh(list_lock);
} }
......
...@@ -91,12 +91,6 @@ enum batadv_icmp_packettype { ...@@ -91,12 +91,6 @@ enum batadv_icmp_packettype {
BATADV_PARAMETER_PROBLEM = 12, BATADV_PARAMETER_PROBLEM = 12,
}; };
/* fragmentation defines */
enum batadv_unicast_frag_flags {
BATADV_UNI_FRAG_HEAD = BIT(0),
BATADV_UNI_FRAG_LARGETAIL = BIT(1),
};
/* tt data subtypes */ /* tt data subtypes */
#define BATADV_TT_DATA_TYPE_MASK 0x0F #define BATADV_TT_DATA_TYPE_MASK 0x0F
...@@ -192,29 +186,47 @@ struct batadv_ogm_packet { ...@@ -192,29 +186,47 @@ struct batadv_ogm_packet {
#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
struct batadv_icmp_packet { /**
* batadv_icmp_header - common ICMP header
* @header: common batman header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
* @uid: local ICMP socket identifier
*/
struct batadv_icmp_header {
struct batadv_header header; struct batadv_header header;
uint8_t msg_type; /* see ICMP message types above */ uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN]; uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN]; uint8_t orig[ETH_ALEN];
__be16 seqno;
uint8_t uid; uint8_t uid;
};
/**
* batadv_icmp_packet - ICMP packet
* @icmph: common ICMP header
* @reserved: not used - useful for alignment
* @seqno: ICMP sequence number
*/
struct batadv_icmp_packet {
struct batadv_icmp_header icmph;
uint8_t reserved; uint8_t reserved;
__be16 seqno;
}; };
#define BATADV_RR_LEN 16 #define BATADV_RR_LEN 16
/* icmp_packet_rr must start with all fields from imcp_packet /**
* as this is assumed by code that handles ICMP packets * batadv_icmp_packet_rr - ICMP RouteRecord packet
* @icmph: common ICMP header
* @rr_cur: number of entries the rr array
* @seqno: ICMP sequence number
* @rr: route record array
*/ */
struct batadv_icmp_packet_rr { struct batadv_icmp_packet_rr {
struct batadv_header header; struct batadv_icmp_header icmph;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
__be16 seqno;
uint8_t uid;
uint8_t rr_cur; uint8_t rr_cur;
__be16 seqno;
uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
}; };
...@@ -255,15 +267,32 @@ struct batadv_unicast_4addr_packet { ...@@ -255,15 +267,32 @@ struct batadv_unicast_4addr_packet {
*/ */
}; };
struct batadv_unicast_frag_packet { /**
struct batadv_header header; * struct batadv_frag_packet - fragmented packet
uint8_t ttvn; /* destination translation table version number */ * @header: common batman packet header with type, compatversion, and ttl
uint8_t dest[ETH_ALEN]; * @dest: final destination used when routing fragments
uint8_t flags; * @orig: originator of the fragment used when merging the packet
uint8_t align; * @no: fragment number within this sequence
uint8_t orig[ETH_ALEN]; * @reserved: reserved byte for alignment
__be16 seqno; * @seqno: sequence identification
} __packed; * @total_size: size of the merged packet
*/
struct batadv_frag_packet {
struct batadv_header header;
#if defined(__BIG_ENDIAN_BITFIELD)
uint8_t no:4;
uint8_t reserved:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
uint8_t reserved:4;
uint8_t no:4;
#else
#error "unknown bitfield endianess"
#endif
uint8_t dest[ETH_ALEN];
uint8_t orig[ETH_ALEN];
__be16 seqno;
__be16 total_size;
};
struct batadv_bcast_packet { struct batadv_bcast_packet {
struct batadv_header header; struct batadv_header header;
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include "icmp_socket.h" #include "icmp_socket.h"
#include "translation-table.h" #include "translation-table.h"
#include "originator.h" #include "originator.h"
#include "unicast.h"
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h" #include "distributed-arp-table.h"
#include "network-coding.h" #include "network-coding.h"
#include "fragmentation.h"
static int batadv_route_unicast_packet(struct sk_buff *skb, static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if); struct batadv_hard_iface *recv_if);
...@@ -258,7 +258,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, ...@@ -258,7 +258,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* add data to device queue */ /* add data to device queue */
if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) { if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
batadv_socket_receive_packet(icmp_packet, icmp_len); batadv_socket_receive_packet(icmp_packet, icmp_len);
goto out; goto out;
} }
...@@ -269,7 +269,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, ...@@ -269,7 +269,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
/* answer echo request (ping) */ /* answer echo request (ping) */
/* get routing information */ /* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig); orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
if (!orig_node) if (!orig_node)
goto out; goto out;
...@@ -279,10 +279,11 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, ...@@ -279,10 +279,11 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
icmp_packet->msg_type = BATADV_ECHO_REPLY; ETH_ALEN);
icmp_packet->header.ttl = BATADV_TTL; icmp_packet->icmph.msg_type = BATADV_ECHO_REPLY;
icmp_packet->icmph.header.ttl = BATADV_TTL;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = NET_RX_SUCCESS; ret = NET_RX_SUCCESS;
...@@ -306,9 +307,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, ...@@ -306,9 +307,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data; icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */ /* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) { if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
icmp_packet->orig, icmp_packet->dst); icmp_packet->icmph.orig, icmp_packet->icmph.dst);
goto out; goto out;
} }
...@@ -317,7 +318,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, ...@@ -317,7 +318,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
goto out; goto out;
/* get routing information */ /* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig); orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
if (!orig_node) if (!orig_node)
goto out; goto out;
...@@ -327,10 +328,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, ...@@ -327,10 +328,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data; icmp_packet = (struct batadv_icmp_packet *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
icmp_packet->msg_type = BATADV_TTL_EXCEEDED; ETH_ALEN);
icmp_packet->header.ttl = BATADV_TTL; icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
icmp_packet->icmph.header.ttl = BATADV_TTL;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = NET_RX_SUCCESS; ret = NET_RX_SUCCESS;
...@@ -379,7 +381,9 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, ...@@ -379,7 +381,9 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* add record route information if not full */ /* add record route information if not full */
if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) && if ((icmp_packet->icmph.msg_type == BATADV_ECHO_REPLY ||
icmp_packet->icmph.msg_type == BATADV_ECHO_REQUEST) &&
(hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
(icmp_packet->rr_cur < BATADV_RR_LEN)) { (icmp_packet->rr_cur < BATADV_RR_LEN)) {
memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
ethhdr->h_dest, ETH_ALEN); ethhdr->h_dest, ETH_ALEN);
...@@ -387,15 +391,15 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, ...@@ -387,15 +391,15 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
} }
/* packet for me */ /* packet for me */
if (batadv_is_my_mac(bat_priv, icmp_packet->dst)) if (batadv_is_my_mac(bat_priv, icmp_packet->icmph.dst))
return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */ /* TTL exceeded */
if (icmp_packet->header.ttl < 2) if (icmp_packet->icmph.header.ttl < 2)
return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */ /* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst); orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.dst);
if (!orig_node) if (!orig_node)
goto out; goto out;
...@@ -406,7 +410,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, ...@@ -406,7 +410,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* decrement ttl */ /* decrement ttl */
icmp_packet->header.ttl--; icmp_packet->icmph.header.ttl--;
/* route it */ /* route it */
if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
...@@ -651,11 +655,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, ...@@ -651,11 +655,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
{ {
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node = NULL; struct batadv_orig_node *orig_node = NULL;
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_unicast_packet *unicast_packet; struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = eth_hdr(skb); struct ethhdr *ethhdr = eth_hdr(skb);
int res, hdr_len, ret = NET_RX_DROP; int res, hdr_len, ret = NET_RX_DROP;
struct sk_buff *new_skb;
unicast_packet = (struct batadv_unicast_packet *)skb->data; unicast_packet = (struct batadv_unicast_packet *)skb->data;
...@@ -672,46 +674,12 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, ...@@ -672,46 +674,12 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
if (!orig_node) if (!orig_node)
goto out; goto out;
/* find_router() increases neigh_nodes refcount if found. */
neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node)
goto out;
/* create a copy of the skb, if needed, to modify it. */ /* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0) if (skb_cow(skb, ETH_HLEN) < 0)
goto out; goto out;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
if (unicast_packet->header.packet_type == BATADV_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
ret = batadv_frag_send_skb(skb, bat_priv,
neigh_node->if_incoming,
neigh_node->addr);
goto out;
}
if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
batadv_frag_can_reassemble(skb,
neigh_node->if_incoming->net_dev->mtu)) {
ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
goto out;
/* packet was buffered for late merge */
if (!new_skb) {
ret = NET_RX_SUCCESS;
goto out;
}
skb = new_skb;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
}
/* decrement ttl */ /* decrement ttl */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->header.ttl--; unicast_packet->header.ttl--;
switch (unicast_packet->header.packet_type) { switch (unicast_packet->header.packet_type) {
...@@ -746,8 +714,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, ...@@ -746,8 +714,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
} }
out: out:
if (neigh_node)
batadv_neigh_node_free_ref(neigh_node);
if (orig_node) if (orig_node)
batadv_orig_node_free_ref(orig_node); batadv_orig_node_free_ref(orig_node);
return ret; return ret;
...@@ -1001,51 +967,6 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, ...@@ -1001,51 +967,6 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
return batadv_route_unicast_packet(skb, recv_if); return batadv_route_unicast_packet(skb, recv_if);
} }
int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_unicast_frag_packet *unicast_packet;
int hdr_size = sizeof(*unicast_packet);
struct sk_buff *new_skb = NULL;
int ret;
if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
return NET_RX_DROP;
unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
/* packet for me */
if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
return NET_RX_DROP;
/* packet was buffered for late merge */
if (!new_skb)
return NET_RX_SUCCESS;
if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
hdr_size))
goto rx_success;
if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
hdr_size))
goto rx_success;
batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
sizeof(struct batadv_unicast_packet), NULL);
rx_success:
return NET_RX_SUCCESS;
}
return batadv_route_unicast_packet(skb, recv_if);
}
/** /**
* batadv_recv_unicast_tvlv - receive and process unicast tvlv packets * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
* @skb: unicast tvlv packet to process * @skb: unicast tvlv packet to process
...@@ -1095,6 +1016,64 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb, ...@@ -1095,6 +1016,64 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
return ret; return ret;
} }
/**
* batadv_recv_frag_packet - process received fragment
* @skb: the received fragment
* @recv_if: interface that the skb is received on
*
* This function does one of the three following things: 1) Forward fragment, if
* the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
* lack further fragments; 3) Merge fragments, if we have all needed parts.
*
* Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
*/
int batadv_recv_frag_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node_src = NULL;
struct batadv_frag_packet *frag_packet;
int ret = NET_RX_DROP;
if (batadv_check_unicast_packet(bat_priv, skb,
sizeof(*frag_packet)) < 0)
goto out;
frag_packet = (struct batadv_frag_packet *)skb->data;
orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
if (!orig_node_src)
goto out;
/* Route the fragment if it is not for us and too big to be merged. */
if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
ret = NET_RX_SUCCESS;
goto out;
}
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
/* Add fragment to buffer and merge if possible. */
if (!batadv_frag_skb_buffer(&skb, orig_node_src))
goto out;
/* Deliver merged packet to the appropriate handler, if it was
* merged
*/
if (skb)
batadv_batman_skb_recv(skb, recv_if->net_dev,
&recv_if->batman_adv_ptype, NULL);
ret = NET_RX_SUCCESS;
out:
if (orig_node_src)
batadv_orig_node_free_ref(orig_node_src);
return ret;
}
int batadv_recv_bcast_packet(struct sk_buff *skb, int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if) struct batadv_hard_iface *recv_if)
{ {
......
...@@ -30,8 +30,8 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, ...@@ -30,8 +30,8 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if); struct batadv_hard_iface *recv_if);
int batadv_recv_unicast_packet(struct sk_buff *skb, int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if); struct batadv_hard_iface *recv_if);
int batadv_recv_ucast_frag_packet(struct sk_buff *skb, int batadv_recv_frag_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if); struct batadv_hard_iface *iface);
int batadv_recv_bcast_packet(struct sk_buff *skb, int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if); struct batadv_hard_iface *recv_if);
int batadv_recv_tt_query(struct sk_buff *skb, int batadv_recv_tt_query(struct sk_buff *skb,
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include "soft-interface.h" #include "soft-interface.h"
#include "hard-interface.h" #include "hard-interface.h"
#include "gateway_common.h" #include "gateway_common.h"
#include "gateway_client.h"
#include "originator.h" #include "originator.h"
#include "network-coding.h" #include "network-coding.h"
#include "fragmentation.h"
#include <linux/if_ether.h>
static void batadv_send_outstanding_bcast_packet(struct work_struct *work); static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
...@@ -63,10 +63,10 @@ int batadv_send_skb_packet(struct sk_buff *skb, ...@@ -63,10 +63,10 @@ int batadv_send_skb_packet(struct sk_buff *skb,
ethhdr = eth_hdr(skb); ethhdr = eth_hdr(skb);
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); ethhdr->h_proto = htons(ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN); skb_set_network_header(skb, ETH_HLEN);
skb->protocol = __constant_htons(ETH_P_BATMAN); skb->protocol = htons(ETH_P_BATMAN);
skb->dev = hard_iface->net_dev; skb->dev = hard_iface->net_dev;
...@@ -108,7 +108,19 @@ int batadv_send_skb_to_orig(struct sk_buff *skb, ...@@ -108,7 +108,19 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
/* batadv_find_router() increases neigh_nodes refcount if found. */ /* batadv_find_router() increases neigh_nodes refcount if found. */
neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node) if (!neigh_node)
return ret; goto out;
/* Check if the skb is too large to send in one piece and fragment
* it if needed.
*/
if (atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
/* Fragment and send packet. */
if (batadv_frag_send_packet(skb, orig_node, neigh_node))
ret = NET_XMIT_SUCCESS;
goto out;
}
/* try to network code the packet, if it is received on an interface /* try to network code the packet, if it is received on an interface
* (i.e. being forwarded). If the packet originates from this node or if * (i.e. being forwarded). If the packet originates from this node or if
...@@ -122,8 +134,170 @@ int batadv_send_skb_to_orig(struct sk_buff *skb, ...@@ -122,8 +134,170 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
ret = NET_XMIT_SUCCESS; ret = NET_XMIT_SUCCESS;
} }
batadv_neigh_node_free_ref(neigh_node); out:
if (neigh_node)
batadv_neigh_node_free_ref(neigh_node);
return ret;
}
/**
* batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
* common fields for unicast packets
* @skb: the skb carrying the unicast header to initialize
* @hdr_size: amount of bytes to push at the beginning of the skb
* @orig_node: the destination node
*
* Returns false if the buffer extension was not possible or true otherwise.
*/
static bool
batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node)
{
struct batadv_unicast_packet *unicast_packet;
uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
if (batadv_skb_head_push(skb, hdr_size) < 0)
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->header.version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
unicast_packet->header.packet_type = BATADV_UNICAST;
/* set unicast ttl */
unicast_packet->header.ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
unicast_packet->ttvn = ttvn;
return true;
}
/**
* batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
* Returns false if the payload could not be encapsulated or true otherwise.
*/
static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
{
size_t uni_size = sizeof(struct batadv_unicast_packet);
return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
}
/**
* batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
* unicast 4addr header
* @bat_priv: the bat priv with all the soft interface information
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
* @packet_subtype: the unicast 4addr packet subtype to use
*
* Returns false if the payload could not be encapsulated or true otherwise.
*/
bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
struct sk_buff *skb,
struct batadv_orig_node *orig,
int packet_subtype)
{
struct batadv_hard_iface *primary_if;
struct batadv_unicast_4addr_packet *uc_4addr_packet;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* Pull the header space and fill the unicast_packet substructure.
* We can do that because the first member of the uc_4addr_packet
* is of type struct unicast_packet
*/
if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
orig))
goto out;
uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
uc_4addr_packet->subtype = packet_subtype;
uc_4addr_packet->reserved = 0;
ret = true;
out:
if (primary_if)
batadv_hardif_free_ref(primary_if);
return ret;
}
/**
* batadv_send_generic_unicast_skb - send an skb as unicast
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @packet_type: the batman unicast packet type to use
* @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
* 4addr packets)
*
* Returns 1 in case of error or 0 otherwise.
*/
int batadv_send_skb_generic_unicast(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
int packet_subtype)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct batadv_unicast_packet *unicast_packet;
struct batadv_orig_node *orig_node;
int ret = NET_RX_DROP;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest))
orig_node = batadv_gw_get_selected_orig(bat_priv);
else
/* check for tt host - increases orig_node refcount.
* returns NULL in case of AP isolation
*/
orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest);
if (!orig_node)
goto out;
switch (packet_type) {
case BATADV_UNICAST:
batadv_send_skb_prepare_unicast(skb, orig_node);
break;
case BATADV_UNICAST_4ADDR:
batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, orig_node,
packet_subtype);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
* should never be invoked with any other packet type
*/
goto out;
}
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
* for this client. The destination will receive this packet and will
* try to reroute it because the ttvn contained in the header is less
* than the current one
*/
if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
unicast_packet->ttvn = unicast_packet->ttvn - 1;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = 0;
out:
if (orig_node)
batadv_orig_node_free_ref(orig_node);
if (ret == NET_RX_DROP)
kfree_skb(skb);
return ret; return ret;
} }
......
...@@ -34,5 +34,45 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work); ...@@ -34,5 +34,45 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
void void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface); const struct batadv_hard_iface *hard_iface);
bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
struct sk_buff *skb,
struct batadv_orig_node *orig_node,
int packet_subtype);
int batadv_send_skb_generic_unicast(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
int packet_subtype);
/**
* batadv_send_unicast_skb - send the skb encapsulated in a unicast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: the payload to send
*
* Returns 1 in case of error or 0 otherwise.
*/
static inline int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
return batadv_send_skb_generic_unicast(bat_priv, skb, BATADV_UNICAST,
0);
}
/**
* batadv_send_4addr_unicast_skb - send the skb encapsulated in a unicast 4addr
* packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: the payload to send
* @packet_subtype: the unicast 4addr packet subtype to use
*
* Returns 1 in case of error or 0 otherwise.
*/
static inline int batadv_send_skb_unicast_4addr(struct batadv_priv *bat_priv,
struct sk_buff *skb,
int packet_subtype)
{
return batadv_send_skb_generic_unicast(bat_priv, skb,
BATADV_UNICAST_4ADDR,
packet_subtype);
}
#endif /* _NET_BATMAN_ADV_SEND_H_ */ #endif /* _NET_BATMAN_ADV_SEND_H_ */
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/if_ether.h>
#include "unicast.h"
#include "bridge_loop_avoidance.h" #include "bridge_loop_avoidance.h"
#include "network-coding.h" #include "network-coding.h"
...@@ -139,6 +137,18 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) ...@@ -139,6 +137,18 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
/**
* batadv_interface_set_rx_mode - set the rx mode of a device
* @dev: registered network device to modify
*
* We do not actually need to set any rx filters for the virtual batman
* soft interface. However a dummy handler enables a user to set static
* multicast listeners for instance.
*/
static void batadv_interface_set_rx_mode(struct net_device *dev)
{
}
static int batadv_interface_tx(struct sk_buff *skb, static int batadv_interface_tx(struct sk_buff *skb,
struct net_device *soft_iface) struct net_device *soft_iface)
{ {
...@@ -147,7 +157,7 @@ static int batadv_interface_tx(struct sk_buff *skb, ...@@ -147,7 +157,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
struct batadv_hard_iface *primary_if = NULL; struct batadv_hard_iface *primary_if = NULL;
struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_packet *bcast_packet;
struct vlan_ethhdr *vhdr; struct vlan_ethhdr *vhdr;
__be16 ethertype = __constant_htons(ETH_P_BATMAN); __be16 ethertype = htons(ETH_P_BATMAN);
static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
0x00, 0x00}; 0x00, 0x00};
static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
...@@ -286,7 +296,7 @@ static int batadv_interface_tx(struct sk_buff *skb, ...@@ -286,7 +296,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
ret = batadv_unicast_send_skb(bat_priv, skb); ret = batadv_send_skb_unicast(bat_priv, skb);
if (ret != 0) if (ret != 0)
goto dropped_freed; goto dropped_freed;
} }
...@@ -314,7 +324,7 @@ void batadv_interface_rx(struct net_device *soft_iface, ...@@ -314,7 +324,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct vlan_ethhdr *vhdr; struct vlan_ethhdr *vhdr;
struct batadv_header *batadv_header = (struct batadv_header *)skb->data; struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
unsigned short vid __maybe_unused = BATADV_NO_FLAGS; unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
__be16 ethertype = __constant_htons(ETH_P_BATMAN); __be16 ethertype = htons(ETH_P_BATMAN);
bool is_bcast; bool is_bcast;
is_bcast = (batadv_header->packet_type == BATADV_BCAST); is_bcast = (batadv_header->packet_type == BATADV_BCAST);
...@@ -444,6 +454,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work) ...@@ -444,6 +454,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
static int batadv_softif_init_late(struct net_device *dev) static int batadv_softif_init_late(struct net_device *dev)
{ {
struct batadv_priv *bat_priv; struct batadv_priv *bat_priv;
uint32_t random_seqno;
int ret; int ret;
size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM; size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
...@@ -493,6 +504,10 @@ static int batadv_softif_init_late(struct net_device *dev) ...@@ -493,6 +504,10 @@ static int batadv_softif_init_late(struct net_device *dev)
bat_priv->tt.last_changeset = NULL; bat_priv->tt.last_changeset = NULL;
bat_priv->tt.last_changeset_len = 0; bat_priv->tt.last_changeset_len = 0;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&bat_priv->frag_seqno, random_seqno);
bat_priv->primary_if = NULL; bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0; bat_priv->num_ifaces = 0;
...@@ -580,6 +595,7 @@ static const struct net_device_ops batadv_netdev_ops = { ...@@ -580,6 +595,7 @@ static const struct net_device_ops batadv_netdev_ops = {
.ndo_get_stats = batadv_interface_stats, .ndo_get_stats = batadv_interface_stats,
.ndo_set_mac_address = batadv_interface_set_mac_addr, .ndo_set_mac_address = batadv_interface_set_mac_addr,
.ndo_change_mtu = batadv_interface_change_mtu, .ndo_change_mtu = batadv_interface_change_mtu,
.ndo_set_rx_mode = batadv_interface_set_rx_mode,
.ndo_start_xmit = batadv_interface_tx, .ndo_start_xmit = batadv_interface_tx,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_add_slave = batadv_softif_slave_add, .ndo_add_slave = batadv_softif_slave_add,
...@@ -623,7 +639,7 @@ static void batadv_softif_init_early(struct net_device *dev) ...@@ -623,7 +639,7 @@ static void batadv_softif_init_early(struct net_device *dev)
*/ */
dev->mtu = ETH_DATA_LEN; dev->mtu = ETH_DATA_LEN;
/* reserve more space in the skbuff for our header */ /* reserve more space in the skbuff for our header */
dev->hard_header_len = BATADV_HEADER_LEN; dev->hard_header_len = batadv_max_header_len();
/* generate random address */ /* generate random address */
eth_hw_addr_random(dev); eth_hw_addr_random(dev);
...@@ -760,6 +776,12 @@ static const struct { ...@@ -760,6 +776,12 @@ static const struct {
{ "mgmt_tx_bytes" }, { "mgmt_tx_bytes" },
{ "mgmt_rx" }, { "mgmt_rx" },
{ "mgmt_rx_bytes" }, { "mgmt_rx_bytes" },
{ "frag_tx" },
{ "frag_tx_bytes" },
{ "frag_rx" },
{ "frag_rx_bytes" },
{ "frag_fwd" },
{ "frag_fwd_bytes" },
{ "tt_request_tx" }, { "tt_request_tx" },
{ "tt_request_rx" }, { "tt_request_rx" },
{ "tt_response_tx" }, { "tt_response_tx" },
......
...@@ -117,25 +117,17 @@ batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry) ...@@ -117,25 +117,17 @@ batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
kfree_rcu(tt_local_entry, common.rcu); kfree_rcu(tt_local_entry, common.rcu);
} }
static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) /**
{ * batadv_tt_global_entry_free_ref - decrement the refcounter for a
struct batadv_tt_common_entry *tt_common_entry; * tt_global_entry and possibly free it
struct batadv_tt_global_entry *tt_global_entry; * @tt_global_entry: the object to free
*/
tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
tt_global_entry = container_of(tt_common_entry,
struct batadv_tt_global_entry, common);
kfree(tt_global_entry);
}
static void static void
batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry) batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
{ {
if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
batadv_tt_global_del_orig_list(tt_global_entry); batadv_tt_global_del_orig_list(tt_global_entry);
call_rcu(&tt_global_entry->common.rcu, kfree_rcu(tt_global_entry, common.rcu);
batadv_tt_global_entry_free_rcu);
} }
} }
...@@ -240,6 +232,17 @@ static int batadv_tt_len(int changes_num) ...@@ -240,6 +232,17 @@ static int batadv_tt_len(int changes_num)
return changes_num * sizeof(struct batadv_tvlv_tt_change); return changes_num * sizeof(struct batadv_tvlv_tt_change);
} }
/**
* batadv_tt_entries - compute the number of entries fitting in tt_len bytes
* @tt_len: available space
*
* Returns the number of entries.
*/
static uint16_t batadv_tt_entries(uint16_t tt_len)
{
return tt_len / batadv_tt_len(1);
}
static int batadv_tt_local_init(struct batadv_priv *bat_priv) static int batadv_tt_local_init(struct batadv_priv *bat_priv)
{ {
if (bat_priv->tt.local_hash) if (bat_priv->tt.local_hash)
...@@ -414,7 +417,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) ...@@ -414,7 +417,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
if (tt_diff_len == 0) if (tt_diff_len == 0)
goto container_register; goto container_register;
tt_diff_entries_num = tt_diff_len / batadv_tt_len(1); tt_diff_entries_num = batadv_tt_entries(tt_diff_len);
spin_lock_bh(&bat_priv->tt.changes_list_lock); spin_lock_bh(&bat_priv->tt.changes_list_lock);
atomic_set(&bat_priv->tt.local_changes, 0); atomic_set(&bat_priv->tt.local_changes, 0);
...@@ -805,15 +808,17 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, ...@@ -805,15 +808,17 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
* If a TT local entry exists for this non-mesh client remove it. * If a TT local entry exists for this non-mesh client remove it.
* *
* The caller must hold orig_node refcount. * The caller must hold orig_node refcount.
*
* Return true if the new entry has been added, false otherwise
*/ */
int batadv_tt_global_add(struct batadv_priv *bat_priv, static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_node,
const unsigned char *tt_addr, uint16_t flags, const unsigned char *tt_addr, uint16_t flags,
uint8_t ttvn) uint8_t ttvn)
{ {
struct batadv_tt_global_entry *tt_global_entry; struct batadv_tt_global_entry *tt_global_entry;
struct batadv_tt_local_entry *tt_local_entry; struct batadv_tt_local_entry *tt_local_entry;
int ret = 0; bool ret = false;
int hash_added; int hash_added;
struct batadv_tt_common_entry *common; struct batadv_tt_common_entry *common;
uint16_t local_flags; uint16_t local_flags;
...@@ -914,7 +919,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, ...@@ -914,7 +919,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_TT, bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new global tt entry: %pM (via %pM)\n", "Creating new global tt entry: %pM (via %pM)\n",
common->addr, orig_node->orig); common->addr, orig_node->orig);
ret = 1; ret = true;
out_remove: out_remove:
...@@ -1491,11 +1496,9 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) ...@@ -1491,11 +1496,9 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_node,
const unsigned char *tt_buff, const void *tt_buff,
uint16_t tt_num_changes) uint16_t tt_buff_len)
{ {
uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
/* Replace the old buffer only if I received something in the /* Replace the old buffer only if I received something in the
* last OGM (the OGM could carry no changes) * last OGM (the OGM could carry no changes)
*/ */
...@@ -1622,7 +1625,7 @@ batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, ...@@ -1622,7 +1625,7 @@ batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
tt_len -= tt_len % sizeof(struct batadv_tvlv_tt_change); tt_len -= tt_len % sizeof(struct batadv_tvlv_tt_change);
} }
tt_tot = tt_len / sizeof(struct batadv_tvlv_tt_change); tt_tot = batadv_tt_entries(tt_len);
tvlv_tt_data = kzalloc(sizeof(*tvlv_tt_data) + tt_len, tvlv_tt_data = kzalloc(sizeof(*tvlv_tt_data) + tt_len,
GFP_ATOMIC); GFP_ATOMIC);
...@@ -2032,8 +2035,8 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv, ...@@ -2032,8 +2035,8 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
_batadv_tt_update_changes(bat_priv, orig_node, tt_change, _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
tt_num_changes, ttvn); tt_num_changes, ttvn);
batadv_tt_save_orig_buffer(bat_priv, orig_node, batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
(unsigned char *)tt_change, tt_num_changes); batadv_tt_len(tt_num_changes));
atomic_set(&orig_node->last_ttvn, ttvn); atomic_set(&orig_node->last_ttvn, ttvn);
} }
...@@ -2573,7 +2576,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, ...@@ -2573,7 +2576,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
tt_data = (struct batadv_tvlv_tt_data *)tvlv_value; tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
tvlv_value_len -= sizeof(*tt_data); tvlv_value_len -= sizeof(*tt_data);
num_entries = tvlv_value_len / batadv_tt_len(1); num_entries = batadv_tt_entries(tvlv_value_len);
batadv_tt_update_orig(bat_priv, orig, batadv_tt_update_orig(bat_priv, orig,
(unsigned char *)(tt_data + 1), (unsigned char *)(tt_data + 1),
...@@ -2608,7 +2611,7 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv, ...@@ -2608,7 +2611,7 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
tt_data = (struct batadv_tvlv_tt_data *)tvlv_value; tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
tvlv_value_len -= sizeof(*tt_data); tvlv_value_len -= sizeof(*tt_data);
num_entries = tvlv_value_len / batadv_tt_len(1); num_entries = batadv_tt_entries(tvlv_value_len);
switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) { switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
case BATADV_TT_REQUEST: case BATADV_TT_REQUEST:
......
...@@ -27,13 +27,6 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, ...@@ -27,13 +27,6 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
const uint8_t *addr, const char *message, const uint8_t *addr, const char *message,
bool roaming); bool roaming);
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset); int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *tt_buff, int tt_buff_len);
int batadv_tt_global_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr, uint16_t flags,
uint8_t ttvn);
int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset); int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_node,
......
...@@ -24,13 +24,6 @@ ...@@ -24,13 +24,6 @@
#include "bitarray.h" #include "bitarray.h"
#include <linux/kernel.h> #include <linux/kernel.h>
/**
* Maximum overhead for the encapsulation for a payload packet
*/
#define BATADV_HEADER_LEN \
(ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
sizeof(struct batadv_bcast_packet)))
#ifdef CONFIG_BATMAN_ADV_DAT #ifdef CONFIG_BATMAN_ADV_DAT
/* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed, /* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
...@@ -60,7 +53,6 @@ struct batadv_hard_iface_bat_iv { ...@@ -60,7 +53,6 @@ struct batadv_hard_iface_bat_iv {
* @if_num: identificator of the interface * @if_num: identificator of the interface
* @if_status: status of the interface for batman-adv * @if_status: status of the interface for batman-adv
* @net_dev: pointer to the net_device * @net_dev: pointer to the net_device
* @frag_seqno: last fragment sequence number sent by this interface
* @num_bcasts: number of payload re-broadcasts on this interface (ARQ) * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
* @hardif_obj: kobject of the per interface sysfs "mesh" directory * @hardif_obj: kobject of the per interface sysfs "mesh" directory
* @refcount: number of contexts the object is used * @refcount: number of contexts the object is used
...@@ -76,7 +68,6 @@ struct batadv_hard_iface { ...@@ -76,7 +68,6 @@ struct batadv_hard_iface {
int16_t if_num; int16_t if_num;
char if_status; char if_status;
struct net_device *net_dev; struct net_device *net_dev;
atomic_t frag_seqno;
uint8_t num_bcasts; uint8_t num_bcasts;
struct kobject *hardif_obj; struct kobject *hardif_obj;
atomic_t refcount; atomic_t refcount;
...@@ -87,6 +78,34 @@ struct batadv_hard_iface { ...@@ -87,6 +78,34 @@ struct batadv_hard_iface {
struct work_struct cleanup_work; struct work_struct cleanup_work;
}; };
/**
* struct batadv_frag_table_entry - head in the fragment buffer table
* @head: head of list with fragments
* @lock: lock to protect the list of fragments
* @timestamp: time (jiffie) of last received fragment
* @seqno: sequence number of the fragments in the list
* @size: accumulated size of packets in list
*/
struct batadv_frag_table_entry {
struct hlist_head head;
spinlock_t lock; /* protects head */
unsigned long timestamp;
uint16_t seqno;
uint16_t size;
};
/**
* struct batadv_frag_list_entry - entry in a list of fragments
* @list: list node information
* @skb: fragment
* @no: fragment number in the set
*/
struct batadv_frag_list_entry {
struct hlist_node list;
struct sk_buff *skb;
uint8_t no;
};
/** /**
* struct batadv_orig_node - structure for orig_list maintaining nodes of mesh * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
* @orig: originator ethernet address * @orig: originator ethernet address
...@@ -116,9 +135,6 @@ struct batadv_hard_iface { ...@@ -116,9 +135,6 @@ struct batadv_hard_iface {
* last_bcast_seqno) * last_bcast_seqno)
* @last_bcast_seqno: last broadcast sequence number received by this host * @last_bcast_seqno: last broadcast sequence number received by this host
* @neigh_list: list of potential next hop neighbor towards this orig node * @neigh_list: list of potential next hop neighbor towards this orig node
* @frag_list: fragmentation buffer list for fragment re-assembly
* @last_frag_packet: time when last fragmented packet from this node was
* received
* @neigh_list_lock: lock protecting neigh_list, router and bonding_list * @neigh_list_lock: lock protecting neigh_list, router and bonding_list
* @hash_entry: hlist node for batadv_priv::orig_hash * @hash_entry: hlist node for batadv_priv::orig_hash
* @bat_priv: pointer to soft_iface this orig node belongs to * @bat_priv: pointer to soft_iface this orig node belongs to
...@@ -133,6 +149,7 @@ struct batadv_hard_iface { ...@@ -133,6 +149,7 @@ struct batadv_hard_iface {
* @out_coding_list: list of nodes that can hear this orig * @out_coding_list: list of nodes that can hear this orig
* @in_coding_list_lock: protects in_coding_list * @in_coding_list_lock: protects in_coding_list
* @out_coding_list_lock: protects out_coding_list * @out_coding_list_lock: protects out_coding_list
* @fragments: array with heads for fragment chains
*/ */
struct batadv_orig_node { struct batadv_orig_node {
uint8_t orig[ETH_ALEN]; uint8_t orig[ETH_ALEN];
...@@ -159,8 +176,6 @@ struct batadv_orig_node { ...@@ -159,8 +176,6 @@ struct batadv_orig_node {
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
uint32_t last_bcast_seqno; uint32_t last_bcast_seqno;
struct hlist_head neigh_list; struct hlist_head neigh_list;
struct list_head frag_list;
unsigned long last_frag_packet;
/* neigh_list_lock protects: neigh_list, router & bonding_list */ /* neigh_list_lock protects: neigh_list, router & bonding_list */
spinlock_t neigh_list_lock; spinlock_t neigh_list_lock;
struct hlist_node hash_entry; struct hlist_node hash_entry;
...@@ -181,6 +196,7 @@ struct batadv_orig_node { ...@@ -181,6 +196,7 @@ struct batadv_orig_node {
spinlock_t in_coding_list_lock; /* Protects in_coding_list */ spinlock_t in_coding_list_lock; /* Protects in_coding_list */
spinlock_t out_coding_list_lock; /* Protects out_coding_list */ spinlock_t out_coding_list_lock; /* Protects out_coding_list */
#endif #endif
struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
}; };
/** /**
...@@ -277,6 +293,12 @@ struct batadv_bcast_duplist_entry { ...@@ -277,6 +293,12 @@ struct batadv_bcast_duplist_entry {
* @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
* @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
* @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
* @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
* @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
* @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
* @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
* @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
* @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
* @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
* @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
* @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
...@@ -314,6 +336,12 @@ enum batadv_counters { ...@@ -314,6 +336,12 @@ enum batadv_counters {
BATADV_CNT_MGMT_TX_BYTES, BATADV_CNT_MGMT_TX_BYTES,
BATADV_CNT_MGMT_RX, BATADV_CNT_MGMT_RX,
BATADV_CNT_MGMT_RX_BYTES, BATADV_CNT_MGMT_RX_BYTES,
BATADV_CNT_FRAG_TX,
BATADV_CNT_FRAG_TX_BYTES,
BATADV_CNT_FRAG_RX,
BATADV_CNT_FRAG_RX_BYTES,
BATADV_CNT_FRAG_FWD,
BATADV_CNT_FRAG_FWD_BYTES,
BATADV_CNT_TT_REQUEST_TX, BATADV_CNT_TT_REQUEST_TX,
BATADV_CNT_TT_REQUEST_RX, BATADV_CNT_TT_REQUEST_RX,
BATADV_CNT_TT_RESPONSE_TX, BATADV_CNT_TT_RESPONSE_TX,
...@@ -511,6 +539,7 @@ struct batadv_priv_nc { ...@@ -511,6 +539,7 @@ struct batadv_priv_nc {
* @aggregated_ogms: bool indicating whether OGM aggregation is enabled * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
* @bonding: bool indicating whether traffic bonding is enabled * @bonding: bool indicating whether traffic bonding is enabled
* @fragmentation: bool indicating whether traffic fragmentation is enabled * @fragmentation: bool indicating whether traffic fragmentation is enabled
* @frag_seqno: incremental counter to identify chains of egress fragments
* @ap_isolation: bool indicating whether ap isolation is enabled * @ap_isolation: bool indicating whether ap isolation is enabled
* @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
* enabled * enabled
...@@ -554,6 +583,7 @@ struct batadv_priv { ...@@ -554,6 +583,7 @@ struct batadv_priv {
atomic_t aggregated_ogms; atomic_t aggregated_ogms;
atomic_t bonding; atomic_t bonding;
atomic_t fragmentation; atomic_t fragmentation;
atomic_t frag_seqno;
atomic_t ap_isolation; atomic_t ap_isolation;
#ifdef CONFIG_BATMAN_ADV_BLA #ifdef CONFIG_BATMAN_ADV_BLA
atomic_t bridge_loop_avoidance; atomic_t bridge_loop_avoidance;
...@@ -873,18 +903,6 @@ struct batadv_forw_packet { ...@@ -873,18 +903,6 @@ struct batadv_forw_packet {
struct batadv_hard_iface *if_incoming; struct batadv_hard_iface *if_incoming;
}; };
/**
* struct batadv_frag_packet_list_entry - storage for fragment packet
* @list: list node for orig_node::frag_list
* @seqno: sequence number of the fragment
* @skb: fragment's skb buffer
*/
struct batadv_frag_packet_list_entry {
struct list_head list;
uint16_t seqno;
struct sk_buff *skb;
};
/** /**
* struct batadv_algo_ops - mesh algorithm callbacks * struct batadv_algo_ops - mesh algorithm callbacks
* @list: list node for the batadv_algo_list * @list: list node for the batadv_algo_list
......
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#include "main.h"
#include "unicast.h"
#include "send.h"
#include "soft-interface.h"
#include "gateway_client.h"
#include "originator.h"
#include "hash.h"
#include "translation-table.h"
#include "routing.h"
#include "hard-interface.h"
static struct sk_buff *
batadv_frag_merge_packet(struct list_head *head,
struct batadv_frag_packet_list_entry *tfp,
struct sk_buff *skb)
{
struct batadv_unicast_frag_packet *up;
struct sk_buff *tmp_skb;
struct batadv_unicast_packet *unicast_packet;
int hdr_len = sizeof(*unicast_packet);
int uni_diff = sizeof(*up) - hdr_len;
uint8_t *packet_pos;
up = (struct batadv_unicast_frag_packet *)skb->data;
/* set skb to the first part and tmp_skb to the second part */
if (up->flags & BATADV_UNI_FRAG_HEAD) {
tmp_skb = tfp->skb;
} else {
tmp_skb = skb;
skb = tfp->skb;
}
if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
goto err;
skb_pull(tmp_skb, sizeof(*up));
if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
goto err;
/* move free entry to end */
tfp->skb = NULL;
tfp->seqno = 0;
list_move_tail(&tfp->list, head);
memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
kfree_skb(tmp_skb);
memmove(skb->data + uni_diff, skb->data, hdr_len);
packet_pos = skb_pull(skb, uni_diff);
unicast_packet = (struct batadv_unicast_packet *)packet_pos;
unicast_packet->header.packet_type = BATADV_UNICAST;
return skb;
err:
/* free buffered skb, skb will be freed later */
kfree_skb(tfp->skb);
return NULL;
}
static void batadv_frag_create_entry(struct list_head *head,
struct sk_buff *skb)
{
struct batadv_frag_packet_list_entry *tfp;
struct batadv_unicast_frag_packet *up;
up = (struct batadv_unicast_frag_packet *)skb->data;
/* free and oldest packets stand at the end */
tfp = list_entry((head)->prev, typeof(*tfp), list);
kfree_skb(tfp->skb);
tfp->seqno = ntohs(up->seqno);
tfp->skb = skb;
list_move(&tfp->list, head);
return;
}
static int batadv_frag_create_buffer(struct list_head *head)
{
int i;
struct batadv_frag_packet_list_entry *tfp;
for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
if (!tfp) {
batadv_frag_list_free(head);
return -ENOMEM;
}
tfp->skb = NULL;
tfp->seqno = 0;
INIT_LIST_HEAD(&tfp->list);
list_add(&tfp->list, head);
}
return 0;
}
static struct batadv_frag_packet_list_entry *
batadv_frag_search_packet(struct list_head *head,
const struct batadv_unicast_frag_packet *up)
{
struct batadv_frag_packet_list_entry *tfp;
struct batadv_unicast_frag_packet *tmp_up = NULL;
bool is_head_tmp, is_head;
uint16_t search_seqno;
if (up->flags & BATADV_UNI_FRAG_HEAD)
search_seqno = ntohs(up->seqno)+1;
else
search_seqno = ntohs(up->seqno)-1;
is_head = up->flags & BATADV_UNI_FRAG_HEAD;
list_for_each_entry(tfp, head, list) {
if (!tfp->skb)
continue;
if (tfp->seqno == ntohs(up->seqno))
goto mov_tail;
tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
if (tfp->seqno == search_seqno) {
is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
if (is_head_tmp != is_head)
return tfp;
else
goto mov_tail;
}
}
return NULL;
mov_tail:
list_move_tail(&tfp->list, head);
return NULL;
}
void batadv_frag_list_free(struct list_head *head)
{
struct batadv_frag_packet_list_entry *pf, *tmp_pf;
if (!list_empty(head)) {
list_for_each_entry_safe(pf, tmp_pf, head, list) {
kfree_skb(pf->skb);
list_del(&pf->list);
kfree(pf);
}
}
return;
}
/* frag_reassemble_skb():
* returns NET_RX_DROP if the operation failed - skb is left intact
* returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
* or the skb could be reassembled (skb_new will point to the new packet and
* skb was freed)
*/
int batadv_frag_reassemble_skb(struct sk_buff *skb,
struct batadv_priv *bat_priv,
struct sk_buff **new_skb)
{
struct batadv_orig_node *orig_node;
struct batadv_frag_packet_list_entry *tmp_frag_entry;
int ret = NET_RX_DROP;
struct batadv_unicast_frag_packet *unicast_packet;
unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
*new_skb = NULL;
orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
if (!orig_node)
goto out;
orig_node->last_frag_packet = jiffies;
if (list_empty(&orig_node->frag_list) &&
batadv_frag_create_buffer(&orig_node->frag_list)) {
pr_debug("couldn't create frag buffer\n");
goto out;
}
tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
unicast_packet);
if (!tmp_frag_entry) {
batadv_frag_create_entry(&orig_node->frag_list, skb);
ret = NET_RX_SUCCESS;
goto out;
}
*new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
tmp_frag_entry, skb);
/* if not, merge failed */
if (*new_skb)
ret = NET_RX_SUCCESS;
out:
if (orig_node)
batadv_orig_node_free_ref(orig_node);
return ret;
}
int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
struct batadv_hard_iface *hard_iface,
const uint8_t dstaddr[])
{
struct batadv_unicast_packet tmp_uc, *unicast_packet;
struct batadv_hard_iface *primary_if;
struct sk_buff *frag_skb;
struct batadv_unicast_frag_packet *frag1, *frag2;
int uc_hdr_len = sizeof(*unicast_packet);
int ucf_hdr_len = sizeof(*frag1);
int data_len = skb->len - uc_hdr_len;
int large_tail = 0, ret = NET_RX_DROP;
uint16_t seqno;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto dropped;
frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
if (!frag_skb)
goto dropped;
skb->priority = TC_PRIO_CONTROL;
skb_reserve(frag_skb, ucf_hdr_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
goto drop_frag;
frag1 = (struct batadv_unicast_frag_packet *)skb->data;
frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
frag1->header.ttl--;
frag1->header.version = BATADV_COMPAT_VERSION;
frag1->header.packet_type = BATADV_UNICAST_FRAG;
memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(*frag2));
if (data_len & 1)
large_tail = BATADV_UNI_FRAG_LARGETAIL;
frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
frag2->flags = large_tail;
seqno = atomic_add_return(2, &hard_iface->frag_seqno);
frag1->seqno = htons(seqno - 1);
frag2->seqno = htons(seqno);
batadv_send_skb_packet(skb, hard_iface, dstaddr);
batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
ret = NET_RX_SUCCESS;
goto out;
drop_frag:
kfree_skb(frag_skb);
dropped:
kfree_skb(skb);
out:
if (primary_if)
batadv_hardif_free_ref(primary_if);
return ret;
}
/**
* batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
* common fields for unicast packets
* @skb: packet
* @hdr_size: amount of bytes to push at the beginning of the skb
* @orig_node: the destination node
*
* Returns false if the buffer extension was not possible or true otherwise
*/
static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node)
{
struct batadv_unicast_packet *unicast_packet;
uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
if (batadv_skb_head_push(skb, hdr_size) < 0)
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->header.version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
unicast_packet->header.packet_type = BATADV_UNICAST;
/* set unicast ttl */
unicast_packet->header.ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
unicast_packet->ttvn = ttvn;
return true;
}
/**
* batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
* Returns false if the payload could not be encapsulated or true otherwise.
*
* This call might reallocate skb data.
*/
static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
{
size_t uni_size = sizeof(struct batadv_unicast_packet);
return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
}
/**
* batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
* header
* @bat_priv: the bat priv with all the soft interface information
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
* @packet_subtype: the batman 4addr packet subtype to use
*
* Returns false if the payload could not be encapsulated or true otherwise.
*
* This call might reallocate skb data.
*/
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
struct batadv_orig_node *orig,
int packet_subtype)
{
struct batadv_hard_iface *primary_if;
struct batadv_unicast_4addr_packet *unicast_4addr_packet;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* pull the header space and fill the unicast_packet substructure.
* We can do that because the first member of the unicast_4addr_packet
* is of type struct unicast_packet
*/
if (!batadv_unicast_push_and_fill_skb(skb,
sizeof(*unicast_4addr_packet),
orig))
goto out;
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
ETH_ALEN);
unicast_4addr_packet->subtype = packet_subtype;
unicast_4addr_packet->reserved = 0;
ret = true;
out:
if (primary_if)
batadv_hardif_free_ref(primary_if);
return ret;
}
/**
* batadv_unicast_generic_send_skb - send an skb as unicast
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @packet_type: the batman unicast packet type to use
* @packet_subtype: the batman packet subtype. It is ignored if packet_type is
* not BATADV_UNICAT_4ADDR
*
* Returns 1 in case of error or 0 otherwise
*/
int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
int packet_subtype)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct batadv_unicast_packet *unicast_packet;
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = NET_RX_DROP;
unsigned int dev_mtu, header_len;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
orig_node = batadv_gw_get_selected_orig(bat_priv);
if (orig_node)
goto find_router;
}
/* check for tt host - increases orig_node refcount.
* returns NULL in case of AP isolation
*/
orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest);
find_router:
/* find_router():
* - if orig_node is NULL it returns NULL
* - increases neigh_nodes refcount if found.
*/
neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
if (!neigh_node)
goto out;
switch (packet_type) {
case BATADV_UNICAST:
if (!batadv_unicast_prepare_skb(skb, orig_node))
goto out;
header_len = sizeof(struct batadv_unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
packet_subtype))
goto out;
header_len = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
* should never be invoked with any other packet type
*/
goto out;
}
ethhdr = (struct ethhdr *)(skb->data + header_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
* for this client. The destination will receive this packet and will
* try to reroute it because the ttvn contained in the header is less
* than the current one
*/
if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
unicast_packet->ttvn = unicast_packet->ttvn - 1;
dev_mtu = neigh_node->if_incoming->net_dev->mtu;
/* fragmentation mechanism only works for UNICAST (now) */
if (packet_type == BATADV_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
data_len + sizeof(*unicast_packet) > dev_mtu) {
/* send frag skb decreases ttl */
unicast_packet->header.ttl++;
ret = batadv_frag_send_skb(skb, bat_priv,
neigh_node->if_incoming,
neigh_node->addr);
goto out;
}
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = 0;
out:
if (neigh_node)
batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
batadv_orig_node_free_ref(orig_node);
if (ret == NET_RX_DROP)
kfree_skb(skb);
return ret;
}
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#ifndef _NET_BATMAN_ADV_UNICAST_H_
#define _NET_BATMAN_ADV_UNICAST_H_
#include "packet.h"
#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
int batadv_frag_reassemble_skb(struct sk_buff *skb,
struct batadv_priv *bat_priv,
struct sk_buff **new_skb);
void batadv_frag_list_free(struct list_head *head);
int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
struct batadv_hard_iface *hard_iface,
const uint8_t dstaddr[]);
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
struct batadv_orig_node *orig_node,
int packet_subtype);
int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
int packet_subtype);
/**
* batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: the payload to send
*/
static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
0);
}
/**
* batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: the payload to send
* @packet_subtype: the batman 4addr packet subtype to use
*/
static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
int packet_subtype)
{
return batadv_unicast_generic_send_skb(bat_priv, skb,
BATADV_UNICAST_4ADDR,
packet_subtype);
}
static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
{
const struct batadv_unicast_frag_packet *unicast_packet;
int uneven_correction = 0;
unsigned int merged_size;
unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
uneven_correction = 1;
else
uneven_correction = -1;
}
merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
return merged_size <= mtu;
}
#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment