Commit 8280687b authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/home/davem/src/BK/network-2.5

into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents eff95566 9aecd918
......@@ -587,7 +587,6 @@ static inline void dev_kfree_skb_any(struct sk_buff *skb)
dev_kfree_skb(skb);
}
extern void net_call_rx_atomic(void (*fn)(void));
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
......
......@@ -155,15 +155,15 @@ typedef struct sctp_paramhdr {
typedef enum {
/* RFC 2960 Section 3.3.5 */
SCTP_PARAM_HEATBEAT_INFO = __constant_htons(1),
SCTP_PARAM_HEARTBEAT_INFO = __constant_htons(1),
/* RFC 2960 Section 3.3.2.1 */
SCTP_PARAM_IPV4_ADDRESS = __constant_htons(5),
SCTP_PARAM_IPV6_ADDRESS = __constant_htons(6),
SCTP_PARAM_STATE_COOKIE = __constant_htons(7),
SCTP_PARAM_UNRECOGNIZED_PARAMETERS = __constant_htons(8),
SCTP_PARAM_COOKIE_PRESERVATIVE = __constant_htons(9),
SCTP_PARAM_HOST_NAME_ADDRESS = __constant_htons(11),
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12),
SCTP_PARAM_IPV4_ADDRESS = __constant_htons(5),
SCTP_PARAM_IPV6_ADDRESS = __constant_htons(6),
SCTP_PARAM_STATE_COOKIE = __constant_htons(7),
SCTP_PARAM_UNRECOGNIZED_PARAMETERS = __constant_htons(8),
SCTP_PARAM_COOKIE_PRESERVATIVE = __constant_htons(9),
SCTP_PARAM_HOST_NAME_ADDRESS = __constant_htons(11),
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12),
SCTP_PARAM_ECN_CAPABLE = __constant_htons(0x8000),
/* Add-IP Extension. Section 3.2 */
......@@ -190,6 +190,7 @@ typedef enum {
SCTP_PARAM_ACTION_SKIP_ERR = __constant_htons(0xc000),
} sctp_param_action_t;
enum { SCTP_PARAM_ACTION_MASK = __constant_htons(0xc000), };
/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */
......
......@@ -103,6 +103,14 @@
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
/* Certain internal static functions need to be exported when
* compiled into the test frame.
*/
#ifndef SCTP_STATIC
#define SCTP_STATIC static
#endif
/*
* Function declarations.
*/
......
......@@ -215,7 +215,8 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *,
int priority);
sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *,
const sctp_chunk_t *,
const int priority);
const int priority,
const int unkparam_len);
sctp_chunk_t *sctp_make_cookie_echo(const sctp_association_t *,
const sctp_chunk_t *);
sctp_chunk_t *sctp_make_cookie_ack(const sctp_association_t *,
......@@ -304,6 +305,14 @@ void sctp_generate_t3_rtx_event(unsigned long peer);
void sctp_generate_heartbeat_event(unsigned long peer);
sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *);
sctp_packet_t *sctp_abort_pkt_new(const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
sctp_chunk_t *chunk,
const void *payload,
size_t paylen);
sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
const sctp_chunk_t *chunk);
void sctp_ootb_pkt_free(sctp_packet_t *packet);
sctp_cookie_param_t *
sctp_pack_cookie(const sctp_endpoint_t *, const sctp_association_t *,
......
......@@ -1044,6 +1044,20 @@ sctp_association_t *sctp_endpoint_lookup_assoc(const sctp_endpoint_t *ep,
sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *,
const sockaddr_storage_t *);
int sctp_verify_init(const sctp_association_t *asoc,
sctp_cid_t cid,
sctp_init_chunk_t *peer_init,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chunk);
int sctp_verify_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_cid_t cid,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chunk);
int sctp_process_unk_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chunk);
void sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
const sockaddr_storage_t *peer_addr,
sctp_init_chunk_t *peer_init, int priority);
......
......@@ -21,6 +21,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/if_bridge.h>
#include <linux/brlock.h>
#include <asm/uaccess.h>
#include "br_private.h"
......@@ -55,11 +56,6 @@ static int __init br_init(void)
return 0;
}
static void __br_clear_frame_hook(void)
{
br_handle_frame_hook = NULL;
}
static void __br_clear_ioctl_hook(void)
{
br_ioctl_hook = NULL;
......@@ -69,7 +65,11 @@ static void __exit br_deinit(void)
{
unregister_netdevice_notifier(&br_device_notifier);
br_call_ioctl_atomic(__br_clear_ioctl_hook);
net_call_rx_atomic(__br_clear_frame_hook);
br_write_lock_bh(BR_NETPROTO_LOCK);
br_handle_frame_hook = NULL;
br_write_unlock_bh(BR_NETPROTO_LOCK);
#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
br_fdb_get_hook = NULL;
br_fdb_put_hook = NULL;
......
......@@ -18,6 +18,7 @@
#include <linux/if_bridge.h>
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/brlock.h>
#include <asm/uaccess.h>
#include "br_private.h"
......@@ -37,7 +38,7 @@ static int br_initial_port_cost(struct net_device *dev)
return 100;
}
/* called under bridge lock */
/* called under BR_NETPROTO_LOCK and bridge lock */
static int __br_del_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
......@@ -86,10 +87,12 @@ static struct net_bridge **__find_br(char *name)
static void del_ifs(struct net_bridge *br)
{
write_lock_bh(&br->lock);
br_write_lock_bh(BR_NETPROTO_LOCK);
write_lock(&br->lock);
while (br->port_list != NULL)
__br_del_if(br, br->port_list->dev);
write_unlock_bh(&br->lock);
write_unlock(&br->lock);
br_write_unlock_bh(BR_NETPROTO_LOCK);
}
static struct net_bridge *new_nb(char *name)
......@@ -252,10 +255,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
{
int retval;
write_lock_bh(&br->lock);
br_write_lock_bh(BR_NETPROTO_LOCK);
write_lock(&br->lock);
retval = __br_del_if(br, dev);
br_stp_recalculate_bridge_id(br);
write_unlock_bh(&br->lock);
write_unlock(&br->lock);
br_write_unlock_bh(BR_NETPROTO_LOCK);
return retval;
}
......
......@@ -369,10 +369,19 @@ static void br_make_blocking(struct net_bridge_port *p)
static void br_make_forwarding(struct net_bridge_port *p)
{
if (p->state == BR_STATE_BLOCKING) {
printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
p->br->dev.name, p->port_no, p->dev->name, "listening");
p->state = BR_STATE_LISTENING;
if (p->br->stp_enabled) {
printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
p->br->dev.name, p->port_no, p->dev->name,
"listening");
p->state = BR_STATE_LISTENING;
} else {
printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
p->br->dev.name, p->port_no, p->dev->name,
"learning");
p->state = BR_STATE_LEARNING;
}
br_timer_set(&p->forward_delay_timer, jiffies);
}
}
......
......@@ -1374,20 +1374,6 @@ static void net_tx_action(struct softirq_action *h)
}
}
/**
* net_call_rx_atomic
* @fn: function to call
*
* Make a function call that is atomic with respect to the protocol
* layers.
*/
void net_call_rx_atomic(void (*fn)(void))
{
br_write_lock_bh(BR_NETPROTO_LOCK);
fn();
br_write_unlock_bh(BR_NETPROTO_LOCK);
}
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
int (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
#endif
......
......@@ -984,16 +984,16 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
}
static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = {
[4] = { doit: inet_rtm_newaddr, },
[5] = { doit: inet_rtm_deladdr, },
[6] = { dumpit: inet_dump_ifaddr, },
[8] = { doit: inet_rtm_newroute, },
[9] = { doit: inet_rtm_delroute, },
[10] = { doit: inet_rtm_getroute, dumpit: inet_dump_fib, },
[4] = { .doit = inet_rtm_newaddr, },
[5] = { .doit = inet_rtm_deladdr, },
[6] = { .dumpit = inet_dump_ifaddr, },
[8] = { .doit = inet_rtm_newroute, },
[9] = { .doit = inet_rtm_delroute, },
[10] = { .doit = inet_rtm_getroute, .dumpit = inet_dump_fib, },
#ifdef CONFIG_IP_MULTIPLE_TABLES
[16] = { doit: inet_rtm_newrule, },
[17] = { doit: inet_rtm_delrule, },
[18] = { dumpit: inet_dump_rules, },
[16] = { .doit = inet_rtm_newrule, },
[17] = { .doit = inet_rtm_delrule, },
[18] = { .dumpit = inet_dump_rules, },
#endif
};
......
......@@ -229,7 +229,7 @@ static int igmp_send_report(struct net_device *dev, u32 group, int type)
iph->version = 4;
iph->ihl = (sizeof(struct iphdr)+4)>>2;
iph->tos = 0;
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->daddr = dst;
iph->saddr = rt->rt_src;
......
......@@ -126,7 +126,7 @@ static struct net_device ipgre_fb_tunnel_dev = {
static struct ip_tunnel ipgre_fb_tunnel = {
.dev = &ipgre_fb_tunnel_dev,
.parms ={ name: "gre0" }
.parms ={ .name = "gre0" }
};
/* Tunnel hash table */
......@@ -414,7 +414,7 @@ void ipgre_err(struct sk_buff *skb, u32 info)
struct sk_buff *skb2;
struct rtable *rt;
if (p[1] != __constant_htons(ETH_P_IP))
if (p[1] != htons(ETH_P_IP))
return;
flags = p[0];
......@@ -537,10 +537,10 @@ void ipgre_err(struct sk_buff *skb, u32 info)
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
{
if (INET_ECN_is_ce(iph->tos)) {
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
if (INET_ECN_is_not_ce(skb->nh.iph->tos))
IP_ECN_set_ce(skb->nh.iph);
} else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if (INET_ECN_is_not_ce(ip6_get_dsfield(skb->nh.ipv6h)))
IP6_ECN_set_ce(skb->nh.ipv6h);
}
......@@ -551,9 +551,9 @@ static inline u8
ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
{
u8 inner = 0;
if (skb->protocol == __constant_htons(ETH_P_IP))
if (skb->protocol == htons(ETH_P_IP))
inner = old_iph->tos;
else if (skb->protocol == __constant_htons(ETH_P_IPV6))
else if (skb->protocol == htons(ETH_P_IPV6))
inner = ip6_get_dsfield((struct ipv6hdr*)old_iph);
return INET_ECN_encapsulate(tos, inner);
}
......@@ -710,13 +710,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
rt = (struct rtable*)skb->dst;
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
#ifdef CONFIG_IPV6
else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
int addr_type;
struct neighbour *neigh = skb->dst->neighbour;
......@@ -744,7 +744,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tos = tiph->tos;
if (tos&1) {
if (skb->protocol == __constant_htons(ETH_P_IP))
if (skb->protocol == htons(ETH_P_IP))
tos = old_iph->tos;
tos &= ~1;
}
......@@ -767,13 +767,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
else
mtu = skb->dst ? skb->dst->pmtu : dev->mtu;
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
if (skb->dst && mtu < skb->dst->pmtu && mtu >= 68)
skb->dst->pmtu = mtu;
df |= (old_iph->frag_off&__constant_htons(IP_DF));
df |= (old_iph->frag_off&htons(IP_DF));
if ((old_iph->frag_off&__constant_htons(IP_DF)) &&
if ((old_iph->frag_off&htons(IP_DF)) &&
mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
......@@ -781,7 +781,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#ifdef CONFIG_IPV6
else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
if (rt6 && mtu < rt6->u.dst.pmtu && mtu >= IPV6_MIN_MTU) {
......@@ -847,10 +847,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->saddr = rt->rt_src;
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == __constant_htons(ETH_P_IP))
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
#ifdef CONFIG_IPV6
else if (skb->protocol == __constant_htons(ETH_P_IPV6))
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
#endif
else
......@@ -938,11 +938,11 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)) ||
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= __constant_htons(IP_DF);
p.iph.frag_off |= htons(IP_DF);
if (!(p.i_flags&GRE_KEY))
p.i_key = 0;
......
......@@ -136,7 +136,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = inet->ttl;
......@@ -187,7 +187,7 @@ __inline__ int ip_finish_output(struct sk_buff *skb)
struct net_device *dev = skb->dst->dev;
skb->dev = dev;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
ip_finish_output2);
......@@ -209,7 +209,7 @@ int ip_mc_output(struct sk_buff *skb)
#endif
skb->dev = dev;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
/*
* Multicasts are looped back for other local users
......@@ -394,7 +394,7 @@ int ip_queue_xmit(struct sk_buff *skb)
*((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
iph->tot_len = htons(skb->len);
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = inet->ttl;
......@@ -463,7 +463,7 @@ static int ip_build_xmit_slow(struct sock *sk,
mtu = rt->u.dst.pmtu;
if (ip_dont_fragment(sk, &rt->u.dst))
df = __constant_htons(IP_DF);
df = htons(IP_DF);
length -= sizeof(struct iphdr);
......@@ -594,7 +594,7 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* Any further fragments will have MF set.
*/
mf = __constant_htons(IP_MF);
mf = htons(IP_MF);
}
if (rt->rt_type == RTN_MULTICAST)
iph->ttl = inet->mc_ttl;
......@@ -693,7 +693,7 @@ int ip_build_xmit(struct sock *sk,
*/
df = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
df = __constant_htons(IP_DF);
df = htons(IP_DF);
/*
* Fast path for unfragmented frames without options.
......@@ -797,7 +797,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & __constant_htons(IP_MF);
not_last_frag = iph->frag_off & htons(IP_MF);
/*
* Keep copying data until we run out.
......@@ -882,7 +882,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= __constant_htons(IP_MF);
iph->frag_off |= htons(IP_MF);
ptr += len;
offset += len;
......
......@@ -362,11 +362,11 @@ static int __init ic_defaults(void)
if (ic_netmask == INADDR_NONE) {
if (IN_CLASSA(ntohl(ic_myaddr)))
ic_netmask = __constant_htonl(IN_CLASSA_NET);
ic_netmask = htonl(IN_CLASSA_NET);
else if (IN_CLASSB(ntohl(ic_myaddr)))
ic_netmask = __constant_htonl(IN_CLASSB_NET);
ic_netmask = htonl(IN_CLASSB_NET);
else if (IN_CLASSC(ntohl(ic_myaddr)))
ic_netmask = __constant_htonl(IN_CLASSC_NET);
ic_netmask = htonl(IN_CLASSC_NET);
else {
printk(KERN_ERR "IP-Config: Unable to guess netmask for address %u.%u.%u.%u\n",
NIPQUAD(ic_myaddr));
......@@ -432,11 +432,11 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
goto drop;
/* If it's not a RARP reply, delete it. */
if (rarp->ar_op != __constant_htons(ARPOP_RREPLY))
if (rarp->ar_op != htons(ARPOP_RREPLY))
goto drop;
/* If it's not Ethernet, delete it. */
if (rarp->ar_pro != __constant_htons(ETH_P_IP))
if (rarp->ar_pro != htons(ETH_P_IP))
goto drop;
/* Extract variable-width fields */
......@@ -672,15 +672,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
h->version = 4;
h->ihl = 5;
h->tot_len = htons(sizeof(struct bootp_pkt));
h->frag_off = __constant_htons(IP_DF);
h->frag_off = htons(IP_DF);
h->ttl = 64;
h->protocol = IPPROTO_UDP;
h->daddr = INADDR_BROADCAST;
h->check = ip_fast_csum((unsigned char *) h, h->ihl);
/* Construct UDP header */
b->udph.source = __constant_htons(68);
b->udph.dest = __constant_htons(67);
b->udph.source = htons(68);
b->udph.dest = htons(67);
b->udph.len = htons(sizeof(struct bootp_pkt) - sizeof(struct iphdr));
/* UDP checksum not calculated -- explicitly allowed in BOOTP RFC */
......@@ -711,7 +711,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
/* Chain packet down the line... */
skb->dev = dev;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
if ((dev->hard_header &&
dev->hard_header(skb, dev, ntohs(skb->protocol), dev->broadcast, dev->dev_addr, skb->len) < 0) ||
dev_queue_xmit(skb) < 0)
......@@ -819,13 +819,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
ip_fast_csum((char *) h, h->ihl) != 0 ||
skb->len < ntohs(h->tot_len) ||
h->protocol != IPPROTO_UDP ||
b->udph.source != __constant_htons(67) ||
b->udph.dest != __constant_htons(68) ||
b->udph.source != htons(67) ||
b->udph.dest != htons(68) ||
ntohs(h->tot_len) < ntohs(b->udph.len) + sizeof(struct iphdr))
goto drop;
/* Fragments are not supported */
if (h->frag_off & __constant_htons(IP_OFFSET | IP_MF)) {
if (h->frag_off & htons(IP_OFFSET | IP_MF)) {
printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented reply.\n");
goto drop;
}
......
......@@ -129,7 +129,7 @@ static struct net_device ipip_fb_tunnel_dev = {
static struct ip_tunnel ipip_fb_tunnel = {
.dev = &ipip_fb_tunnel_dev,
.parms ={ name: "tunl0", }
.parms ={ .name = "tunl0", }
};
static struct ip_tunnel *tunnels_r_l[HASH_SIZE];
......@@ -483,7 +483,7 @@ int ipip_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
read_lock(&ipip_lock);
......@@ -544,7 +544,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
if (skb->protocol != __constant_htons(ETH_P_IP))
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
if (tos&1)
......@@ -585,9 +585,9 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->dst && mtu < skb->dst->pmtu)
skb->dst->pmtu = mtu;
df |= (old_iph->frag_off&__constant_htons(IP_DF));
df |= (old_iph->frag_off&htons(IP_DF));
if ((old_iph->frag_off&__constant_htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
goto tx_error;
......@@ -703,10 +703,10 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)))
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= __constant_htons(IP_DF);
p.iph.frag_off |= htons(IP_DF);
t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
......
......@@ -1434,7 +1434,7 @@ int pim_rcv_v1(struct sk_buff * skb)
skb->nh.iph = (struct iphdr *)skb->data;
skb->dev = reg_dev;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
......@@ -1501,7 +1501,7 @@ int pim_rcv(struct sk_buff * skb)
skb->nh.iph = (struct iphdr *)skb->data;
skb->dev = reg_dev;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
......
......@@ -804,7 +804,7 @@ do_bindings(struct ip_conntrack *ct,
/* Always defragged for helpers */
IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
& __constant_htons(IP_MF|IP_OFFSET)));
& htons(IP_MF|IP_OFFSET)));
/* Have to grab read lock before sibling_list traversal */
READ_LOCK(&ip_conntrack_lock);
......
......@@ -1260,9 +1260,9 @@ static unsigned int nat_help(struct ip_conntrack *ct,
* on post routing (SNAT).
*/
if (!((dir == IP_CT_DIR_REPLY && hooknum == NF_IP_PRE_ROUTING &&
udph->source == __constant_ntohs(SNMP_PORT)) ||
udph->source == ntohs(SNMP_PORT)) ||
(dir == IP_CT_DIR_ORIGINAL && hooknum == NF_IP_POST_ROUTING &&
udph->dest == __constant_ntohs(SNMP_TRAP_PORT)))) {
udph->dest == ntohs(SNMP_TRAP_PORT)))) {
spin_unlock_bh(&snmp_lock);
return NF_ACCEPT;
}
......
......@@ -75,7 +75,7 @@ ip_nat_fn(unsigned int hooknum,
/* We never see fragments: conntrack defrags on pre-routing
and local-out, and ip_nat_out protects post-routing. */
IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
& __constant_htons(IP_MF|IP_OFFSET)));
& htons(IP_MF|IP_OFFSET)));
(*pskb)->nfcache |= NFC_UNKNOWN;
......@@ -186,7 +186,7 @@ ip_nat_out(unsigned int hooknum,
I'm starting to have nightmares about fragments. */
if ((*pskb)->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
*pskb = ip_ct_gather_frags(*pskb);
if (!*pskb)
......
......@@ -1244,7 +1244,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
return -EINVAL;
if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
skb->protocol != __constant_htons(ETH_P_IP))
skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (ZERONET(saddr)) {
......@@ -1455,7 +1455,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(res))))
flags |= RTCF_DOREDIRECT;
if (skb->protocol != __constant_htons(ETH_P_IP)) {
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*/
......@@ -1520,7 +1520,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
out: return err;
brd_input:
if (skb->protocol != __constant_htons(ETH_P_IP))
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (ZERONET(saddr))
......@@ -2154,7 +2154,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
err = -ENODEV;
if (!dev)
goto out;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
local_bh_disable();
err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
......
......@@ -360,7 +360,7 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
break;
if (sk->family == AF_INET6 && cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == __constant_htonl(0xffff) &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr+3, cond->addr, cond->prefix_len))
break;
}
......
......@@ -2103,8 +2103,8 @@ static __inline__ int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr
} else if (tp->tstamp_ok &&
th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
__u32 *ptr = (__u32 *)(th + 1);
if (*ptr == __constant_ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
tp->saw_tstamp = 1;
++ptr;
tp->rcv_tsval = ntohl(*ptr);
......@@ -3275,8 +3275,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
__u32 *ptr = (__u32 *)(th + 1);
/* No? Slow path! */
if (*ptr != __constant_ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
goto slow_path;
tp->saw_tstamp = 1;
......
......@@ -81,8 +81,8 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
.__tcp_lhash_lock = RW_LOCK_UNLOCKED,
.__tcp_lhash_users = ATOMIC_INIT(0),
__tcp_lhash_wait:
__WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
.__tcp_lhash_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
.__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
};
......
......@@ -428,7 +428,7 @@ static void tcp_twkill(unsigned long);
static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS];
static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
static struct timer_list tcp_tw_timer = { function: tcp_twkill };
static struct timer_list tcp_tw_timer = { .function = tcp_twkill };
static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
{
......@@ -495,7 +495,7 @@ void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
static int tcp_twcal_hand = -1;
static int tcp_twcal_jiffie;
static void tcp_twcal_tick(unsigned long);
static struct timer_list tcp_twcal_timer = {function: tcp_twcal_tick};
static struct timer_list tcp_twcal_timer = {.function = tcp_twcal_tick};
static struct tcp_tw_bucket *tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
......
......@@ -94,7 +94,7 @@ rwlock_t addrconf_lock = RW_LOCK_UNLOCKED;
static void addrconf_verify(unsigned long);
static struct timer_list addr_chk_timer = { function: addrconf_verify };
static struct timer_list addr_chk_timer = { .function = addrconf_verify };
static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED;
static int addrconf_ifdown(struct net_device *dev, int how);
......@@ -144,47 +144,47 @@ int ipv6_addr_type(struct in6_addr *addr)
/* Consider all addresses with the first three bits different of
000 and 111 as unicasts.
*/
if ((st & __constant_htonl(0xE0000000)) != __constant_htonl(0x00000000) &&
(st & __constant_htonl(0xE0000000)) != __constant_htonl(0xE0000000))
if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
(st & htonl(0xE0000000)) != htonl(0xE0000000))
return IPV6_ADDR_UNICAST;
if ((st & __constant_htonl(0xFF000000)) == __constant_htonl(0xFF000000)) {
if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
int type = IPV6_ADDR_MULTICAST;
switch((st & __constant_htonl(0x00FF0000))) {
case __constant_htonl(0x00010000):
switch((st & htonl(0x00FF0000))) {
case htonl(0x00010000):
type |= IPV6_ADDR_LOOPBACK;
break;
case __constant_htonl(0x00020000):
case htonl(0x00020000):
type |= IPV6_ADDR_LINKLOCAL;
break;
case __constant_htonl(0x00050000):
case htonl(0x00050000):
type |= IPV6_ADDR_SITELOCAL;
break;
};
return type;
}
if ((st & __constant_htonl(0xFFC00000)) == __constant_htonl(0xFE800000))
if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST);
if ((st & __constant_htonl(0xFFC00000)) == __constant_htonl(0xFEC00000))
if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST);
if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
if (addr->s6_addr32[2] == 0) {
if (addr->in6_u.u6_addr32[3] == 0)
if (addr->s6_addr32[3] == 0)
return IPV6_ADDR_ANY;
if (addr->s6_addr32[3] == __constant_htonl(0x00000001))
if (addr->s6_addr32[3] == htonl(0x00000001))
return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST);
return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST);
}
if (addr->s6_addr32[2] == __constant_htonl(0x0000ffff))
if (addr->s6_addr32[2] == htonl(0x0000ffff))
return IPV6_ADDR_MAPPED;
}
......@@ -755,7 +755,7 @@ static void addrconf_add_mroute(struct net_device *dev)
memset(&rtmsg, 0, sizeof(rtmsg));
ipv6_addr_set(&rtmsg.rtmsg_dst,
__constant_htonl(0xFF000000), 0, 0, 0);
htonl(0xFF000000), 0, 0, 0);
rtmsg.rtmsg_dst_len = 8;
rtmsg.rtmsg_metric = IP6_RT_PRIO_ADDRCONF;
rtmsg.rtmsg_ifindex = dev->ifindex;
......@@ -785,7 +785,7 @@ static void addrconf_add_lroute(struct net_device *dev)
{
struct in6_addr addr;
ipv6_addr_set(&addr, __constant_htonl(0xFE800000), 0, 0, 0);
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
addrconf_prefix_route(&addr, 10, dev, 0, RTF_ADDRCONF);
}
......@@ -1123,7 +1123,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
if (idev->dev->flags&IFF_POINTOPOINT) {
addr.s6_addr32[0] = __constant_htonl(0xfe800000);
addr.s6_addr32[0] = htonl(0xfe800000);
scope = IFA_LINK;
} else {
scope = IPV6_ADDR_COMPATv4;
......@@ -1237,9 +1237,7 @@ static void addrconf_dev_config(struct net_device *dev)
return;
memset(&addr, 0, sizeof(struct in6_addr));
addr.s6_addr[0] = 0xFE;
addr.s6_addr[1] = 0x80;
addr.s6_addr32[0] = htonl(0xFE800000);
if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
addrconf_add_linklocal(idev, &addr);
......
......@@ -150,7 +150,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
}
} else {
ipv6_addr_set(&sin->sin6_addr, 0, 0,
__constant_htonl(0xffff),
htonl(0xffff),
*(u32*)(skb->nh.raw + serr->addr_offset));
}
}
......@@ -173,7 +173,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
struct inet_opt *inet = inet_sk(sk);
ipv6_addr_set(&sin->sin6_addr, 0, 0,
__constant_htonl(0xffff),
htonl(0xffff),
skb->nh.iph->saddr);
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
......
......@@ -198,7 +198,7 @@ static int is_ineligible(struct sk_buff *skb)
u8 type;
if (skb_copy_bits(skb, ptr+offsetof(struct icmp6hdr, icmp6_type),
&type, 1)
|| !(type & 0x80))
|| !(type & ICMPV6_INFOMSG_MASK))
return 1;
}
return 0;
......@@ -216,7 +216,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
int res = 0;
/* Informational messages are not limited. */
if (type & 0x80)
if (type & ICMPV6_INFOMSG_MASK)
return 1;
/* Do not limit pmtu discovery, it would break it. */
......@@ -519,22 +519,22 @@ static int icmpv6_rcv(struct sk_buff *skb)
skb_checksum(skb, 0, skb->len, 0))) {
if (net_ratelimit())
printk(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
ntohs(saddr->in6_u.u6_addr16[0]),
ntohs(saddr->in6_u.u6_addr16[1]),
ntohs(saddr->in6_u.u6_addr16[2]),
ntohs(saddr->in6_u.u6_addr16[3]),
ntohs(saddr->in6_u.u6_addr16[4]),
ntohs(saddr->in6_u.u6_addr16[5]),
ntohs(saddr->in6_u.u6_addr16[6]),
ntohs(saddr->in6_u.u6_addr16[7]),
ntohs(daddr->in6_u.u6_addr16[0]),
ntohs(daddr->in6_u.u6_addr16[1]),
ntohs(daddr->in6_u.u6_addr16[2]),
ntohs(daddr->in6_u.u6_addr16[3]),
ntohs(daddr->in6_u.u6_addr16[4]),
ntohs(daddr->in6_u.u6_addr16[5]),
ntohs(daddr->in6_u.u6_addr16[6]),
ntohs(daddr->in6_u.u6_addr16[7]));
ntohs(saddr->s6_addr16[0]),
ntohs(saddr->s6_addr16[1]),
ntohs(saddr->s6_addr16[2]),
ntohs(saddr->s6_addr16[3]),
ntohs(saddr->s6_addr16[4]),
ntohs(saddr->s6_addr16[5]),
ntohs(saddr->s6_addr16[6]),
ntohs(saddr->s6_addr16[7]),
ntohs(daddr->s6_addr16[0]),
ntohs(daddr->s6_addr16[1]),
ntohs(daddr->s6_addr16[2]),
ntohs(daddr->s6_addr16[3]),
ntohs(daddr->s6_addr16[4]),
ntohs(daddr->s6_addr16[5]),
ntohs(daddr->s6_addr16[6]),
ntohs(daddr->s6_addr16[7]));
goto discard_it;
}
}
......@@ -613,7 +613,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
printk(KERN_DEBUG "icmpv6: msg of unkown type\n");
/* informational */
if (type & 0x80)
if (type & ICMPV6_INFOMSG_MASK)
break;
/*
......
......@@ -93,7 +93,7 @@ static struct fib6_node * fib6_repair_tree(struct fib6_node *fn);
static __u32 rt_sernum = 0;
static struct timer_list ip6_fib_timer = { function: fib6_run_gc };
static struct timer_list ip6_fib_timer = { .function = fib6_run_gc };
static struct fib6_walker_t fib6_walker_list = {
&fib6_walker_list, &fib6_walker_list,
......
......@@ -102,7 +102,7 @@ int ip6_output(struct sk_buff *skb)
struct dst_entry *dst = skb->dst;
struct net_device *dev = dst->dev;
skb->protocol = __constant_htons(ETH_P_IPV6);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
......@@ -223,7 +223,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
* Fill in the IPv6 header
*/
*(u32*)hdr = __constant_htonl(0x60000000) | fl->fl6_flowlabel;
*(u32*)hdr = htonl(0x60000000) | fl->fl6_flowlabel;
hlimit = -1;
if (np)
hlimit = np->hop_limit;
......@@ -264,7 +264,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
struct ipv6hdr *hdr;
int totlen;
skb->protocol = __constant_htons(ETH_P_IPV6);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
totlen = len + sizeof(struct ipv6hdr);
......
......@@ -408,14 +408,8 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
*/
if (e->info->hook == NF_IP_LOCAL_OUT) {
struct ipv6hdr *iph = e->skb->nh.ipv6h;
if (!( iph->daddr.in6_u.u6_addr32[0] == e->rt_info.daddr.in6_u.u6_addr32[0]
&& iph->daddr.in6_u.u6_addr32[1] == e->rt_info.daddr.in6_u.u6_addr32[1]
&& iph->daddr.in6_u.u6_addr32[2] == e->rt_info.daddr.in6_u.u6_addr32[2]
&& iph->daddr.in6_u.u6_addr32[3] == e->rt_info.daddr.in6_u.u6_addr32[3]
&& iph->saddr.in6_u.u6_addr32[0] == e->rt_info.saddr.in6_u.u6_addr32[0]
&& iph->saddr.in6_u.u6_addr32[1] == e->rt_info.saddr.in6_u.u6_addr32[1]
&& iph->saddr.in6_u.u6_addr32[2] == e->rt_info.saddr.in6_u.u6_addr32[2]
&& iph->saddr.in6_u.u6_addr32[3] == e->rt_info.saddr.in6_u.u6_addr32[3]))
if (ipv6_addr_cmp(&iph->daddr, &e->rt_info.daddr) ||
ipv6_addr_cmp(&iph->saddr, &e->rt_info.saddr))
return route6_me_harder(e->skb);
}
return 0;
......
......@@ -112,7 +112,7 @@ static void dump_packet(const struct ip6t_log_info *info,
printk("FRAG:%u ", ntohs(fhdr->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fhdr->frag_off & __constant_htons(0x0001))
if (fhdr->frag_off & htons(0x0001))
printk("INCOMPLETE ");
printk("ID:%08x ", fhdr->identification);
......
......@@ -372,7 +372,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
/* Is this the final fragment? */
if (!(fhdr->frag_off & __constant_htons(0x0001))) {
if (!(fhdr->frag_off & htons(0x0001))) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
......@@ -648,7 +648,7 @@ int ipv6_reassembly(struct sk_buff **skbp, int nhoff)
hdr = skb->nh.ipv6h;
fhdr = (struct frag_hdr *)skb->h.raw;
if (!(fhdr->frag_off & __constant_htons(0xFFF9))) {
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
skb->h.raw += sizeof(struct frag_hdr);
IP6_INC_STATS_BH(Ip6ReasmOKs);
......
......@@ -396,7 +396,7 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IPV6);
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
tunnel->stat.rx_packets++;
tunnel->stat.rx_bytes += skb->len;
......@@ -470,7 +470,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
if (skb->protocol != __constant_htons(ETH_P_IPV6))
if (skb->protocol != htons(ETH_P_IPV6))
goto tx_error;
if (!dst)
......@@ -588,7 +588,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
if (mtu > IPV6_MIN_MTU)
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
......@@ -659,10 +659,10 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)))
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= __constant_htons(IP_DF);
p.iph.frag_off |= htons(IP_DF);
t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
......
......@@ -424,11 +424,11 @@ static int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
struct inet_opt *inet = inet_sk(sk);
ipv6_addr_set(&sin6->sin6_addr, 0, 0,
__constant_htonl(0xffff), skb->nh.iph->saddr);
htonl(0xffff), skb->nh.iph->saddr);
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
......
......@@ -587,7 +587,6 @@ EXPORT_SYMBOL(ip_route_me_harder);
EXPORT_SYMBOL(register_gifconf);
EXPORT_SYMBOL(net_call_rx_atomic);
EXPORT_SYMBOL(softnet_data);
#if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
......
......@@ -97,7 +97,7 @@ const char *sctp_cname(const sctp_subtype_t cid)
/* These are printable form of variable-length parameters. */
const char *sctp_param_tbl[SCTP_PARAM_ECN_CAPABLE + 1] = {
"",
"PARAM_HEATBEAT_INFO",
"PARAM_HEARTBEAT_INFO",
"",
"",
"",
......
......@@ -104,7 +104,7 @@ void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
void sctp_outqueue_teardown(sctp_outqueue_t *q)
{
sctp_transport_t *transport;
struct list_head *lchunk, *pos;
struct list_head *lchunk, *pos, *temp;
sctp_chunk_t *chunk;
/* Throw away unacknowledged chunks. */
......@@ -117,6 +117,13 @@ void sctp_outqueue_teardown(sctp_outqueue_t *q)
}
}
/* Throw away chunks that have been gap ACKed. */
list_for_each_safe(lchunk, temp, &q->sacked) {
list_del(lchunk);
chunk = list_entry(lchunk, sctp_chunk_t, transmitted_list);
sctp_free_chunk(chunk);
}
/* Throw away any leftover chunks. */
while ((chunk = (sctp_chunk_t *) skb_dequeue(&q->out)))
sctp_free_chunk(chunk);
......
......@@ -223,7 +223,7 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
int priority)
int priority, int unkparam_len)
{
sctp_inithdr_t initack;
sctp_chunk_t *retval;
......@@ -278,7 +278,10 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
if (!cookie)
goto nomem_cookie;
chunksize = sizeof(initack) + addrs_len + cookie_len;
/* Calculate the total size of allocation, include the reserved
* space for reporting unknown parameters if it is specified.
*/
chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
/* Tell peer that we'll do ECN only if peer advertised such cap. */
if (asoc->peer.ecn_capable)
......@@ -883,25 +886,27 @@ sctp_chunk_t *sctp_make_heartbeat_ack(const sctp_association_t *asoc,
return retval;
}
/* Create an Operation Error chunk. */
sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
__u16 cause_code, const void *payload,
size_t paylen)
/* Create an Operation Error chunk with the specified space reserved.
* This routine can be used for containing multiple causes in the chunk.
*/
sctp_chunk_t *sctp_make_op_error_space(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
size_t size)
{
sctp_chunk_t *retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
sizeof(sctp_errhdr_t) + paylen);
sctp_chunk_t *retval;
retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
sizeof(sctp_errhdr_t) + size);
if (!retval)
goto nodata;
sctp_init_cause(retval, cause_code, payload, paylen);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
* An endpoint SHOULD transmit reply chunks (e.g., SACK,
* HEARTBEAT ACK, * etc.) to the same destination transport
* address from which it * received the DATA or control chunk
* HEARTBEAT ACK, etc.) to the same destination transport
* address from which it received the DATA or control chunk
* to which it is replying.
*
*/
if (chunk)
retval->transport = chunk->transport;
......@@ -910,6 +915,23 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
return retval;
}
/* Create an Operation Error chunk. */
sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
__u16 cause_code, const void *payload,
size_t paylen)
{
sctp_chunk_t *retval = sctp_make_op_error_space(asoc, chunk, paylen);
if (!retval)
goto nodata;
sctp_init_cause(retval, cause_code, payload, paylen);
nodata:
return retval;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
......@@ -1405,6 +1427,162 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
* 3rd Level Abstractions
********************************************************************/
/* Verify the INIT packet before we process it. */
int sctp_verify_init(const sctp_association_t *asoc,
sctp_cid_t cid,
sctp_init_chunk_t *peer_init,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chk_p)
{
sctpParam_t param;
uint8_t *end;
/* FIXME - Verify the fixed fields of the INIT chunk. Also, verify
* the mandatory parameters somewhere here and generate either the
* "Missing mandatory parameter" error or the "Invalid mandatory
* parameter" error. */
/* Find unrecognized parameters. */
end = ((uint8_t *)peer_init + ntohs(peer_init->chunk_hdr.length));
for (param.v = peer_init->init_hdr.params;
param.v < end;
param.v += WORD_ROUND(ntohs(param.p->length))) {
if (!sctp_verify_param(asoc, param, cid, chunk, err_chk_p))
return 0;
} /* for (loop through all parameters) */
return 1;
}
/* Find unrecognized parameters in the chunk.
* Return values:
* 0 - discard the chunk
* 1 - continue with the chunk
*/
int sctp_verify_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_cid_t cid,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chk_p)
{
int retval = 1;
/* FIXME - This routine is not looking at each parameter per the
* chunk type, i.e., unrecognized parameters should be further
* identified based on the chunk id.
*/
switch (param.p->type) {
case SCTP_PARAM_IPV4_ADDRESS:
case SCTP_PARAM_IPV6_ADDRESS:
case SCTP_PARAM_COOKIE_PRESERVATIVE:
/* FIXME - If we don't support the host name parameter, we should
* generate an error for this - Unresolvable address.
*/
case SCTP_PARAM_HOST_NAME_ADDRESS:
case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES:
case SCTP_PARAM_STATE_COOKIE:
case SCTP_PARAM_HEARTBEAT_INFO:
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
case SCTP_PARAM_ECN_CAPABLE:
break;
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
return sctp_process_unk_param(asoc, param, chunk, err_chk_p);
break;
}
return retval;
}
/* RFC 3.2.1 & the Implementers Guide 2.2.
*
* The Parameter Types are encoded such that the
* highest-order two bits specify the action that must be
* taken if the processing endpoint does not recognize the
* Parameter Type.
*
* 00 - Stop processing this SCTP chunk and discard it,
* do not process any further chunks within it.
*
* 01 - Stop processing this SCTP chunk and discard it,
* do not process any further chunks within it, and report
* the unrecognized parameter in an 'Unrecognized
* Parameter Type' (in either an ERROR or in the INIT ACK).
*
* 10 - Skip this parameter and continue processing.
*
* 11 - Skip this parameter and continue processing but
* report the unrecognized parameter in an
* 'Unrecognized Parameter Type' (in either an ERROR or in
* the INIT ACK).
*
* Return value:
* 0 - discard the chunk
* 1 - continue with the chunk
*/
int sctp_process_unk_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chk_p)
{
int retval = 1;
switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
case SCTP_PARAM_ACTION_DISCARD:
retval = 0;
break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = 0;
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if (NULL == *err_chk_p)
*err_chk_p = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
if (*err_chk_p)
sctp_init_cause(*err_chk_p, SCTP_ERROR_UNKNOWN_PARAM,
(const void *)param.p,
WORD_ROUND(ntohs(param.p->length)));
break;
case SCTP_PARAM_ACTION_SKIP:
break;
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if (NULL == *err_chk_p)
*err_chk_p = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
if (*err_chk_p) {
sctp_init_cause(*err_chk_p, SCTP_ERROR_UNKNOWN_PARAM,
(const void *)param.p,
WORD_ROUND(ntohs(param.p->length)));
} else {
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
* to the peer and the association won't be established.
*/
retval = 0;
}
break;
default:
break;
}
return retval;
}
/* Unpack the parameters in an INIT packet.
* FIXME: There is no return status to allow callers to do
* error handling.
......@@ -1609,9 +1787,9 @@ int sctp_process_param(sctp_association_t *asoc, sctpParam_t param,
asoc->peer.cookie = param.cookie->body;
break;
case SCTP_PARAM_HEATBEAT_INFO:
case SCTP_PARAM_HEARTBEAT_INFO:
SCTP_DEBUG_PRINTK("unimplemented "
"SCTP_PARAM_HEATBEAT_INFO\n");
"SCTP_PARAM_HEARTBEAT_INFO\n");
break;
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
......@@ -1624,14 +1802,13 @@ int sctp_process_param(sctp_association_t *asoc, sctpParam_t param,
break;
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
* called prior to this routine. Simply log the error
* here.
*/
SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n",
ntohs(param.p->type), asoc);
/* FIXME: The entire parameter processing really needs
* redesigned. For now, always return success as doing
* otherwise craters the system.
*/
retval = 1;
break;
};
......
......@@ -327,7 +327,8 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_GEN_INIT_ACK:
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC);
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
if (!new_obj)
goto nomem;
......@@ -344,10 +345,20 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_GEN_COOKIE_ECHO:
/* Generate a COOKIE ECHO chunk. */
new_obj = sctp_make_cookie_echo(asoc, chunk);
if (!new_obj)
if (!new_obj) {
if (command->obj.ptr)
sctp_free_chunk(command->obj.ptr);
goto nomem;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
if (command->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(command->obj.ptr));
break;
case SCTP_CMD_GEN_SHUTDOWN:
......@@ -397,8 +408,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
/* Send a full packet to our peer. */
packet = command->obj.ptr;
sctp_packet_transmit(packet);
sctp_transport_free(packet->transport);
sctp_packet_free(packet);
sctp_ootb_pkt_free(packet);
break;
case SCTP_CMD_RETRAN:
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment