Commit 8280687b authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/home/davem/src/BK/network-2.5

into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents eff95566 9aecd918
......@@ -587,7 +587,6 @@ static inline void dev_kfree_skb_any(struct sk_buff *skb)
dev_kfree_skb(skb);
}
extern void net_call_rx_atomic(void (*fn)(void));
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
......
......@@ -155,7 +155,7 @@ typedef struct sctp_paramhdr {
typedef enum {
/* RFC 2960 Section 3.3.5 */
SCTP_PARAM_HEATBEAT_INFO = __constant_htons(1),
SCTP_PARAM_HEARTBEAT_INFO = __constant_htons(1),
/* RFC 2960 Section 3.3.2.1 */
SCTP_PARAM_IPV4_ADDRESS = __constant_htons(5),
SCTP_PARAM_IPV6_ADDRESS = __constant_htons(6),
......@@ -190,6 +190,7 @@ typedef enum {
SCTP_PARAM_ACTION_SKIP_ERR = __constant_htons(0xc000),
} sctp_param_action_t;
enum { SCTP_PARAM_ACTION_MASK = __constant_htons(0xc000), };
/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */
......
......@@ -103,6 +103,14 @@
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
/* Certain internal static functions need to be exported when
* compiled into the test frame.
*/
#ifndef SCTP_STATIC
#define SCTP_STATIC static
#endif
/*
* Function declarations.
*/
......
......@@ -215,7 +215,8 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *,
int priority);
sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *,
const sctp_chunk_t *,
const int priority);
const int priority,
const int unkparam_len);
sctp_chunk_t *sctp_make_cookie_echo(const sctp_association_t *,
const sctp_chunk_t *);
sctp_chunk_t *sctp_make_cookie_ack(const sctp_association_t *,
......@@ -304,6 +305,14 @@ void sctp_generate_t3_rtx_event(unsigned long peer);
void sctp_generate_heartbeat_event(unsigned long peer);
sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *);
sctp_packet_t *sctp_abort_pkt_new(const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
sctp_chunk_t *chunk,
const void *payload,
size_t paylen);
sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
const sctp_chunk_t *chunk);
void sctp_ootb_pkt_free(sctp_packet_t *packet);
sctp_cookie_param_t *
sctp_pack_cookie(const sctp_endpoint_t *, const sctp_association_t *,
......
......@@ -1044,6 +1044,20 @@ sctp_association_t *sctp_endpoint_lookup_assoc(const sctp_endpoint_t *ep,
sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *,
const sockaddr_storage_t *);
int sctp_verify_init(const sctp_association_t *asoc,
sctp_cid_t cid,
sctp_init_chunk_t *peer_init,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chunk);
int sctp_verify_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_cid_t cid,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chunk);
int sctp_process_unk_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chunk);
void sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
const sockaddr_storage_t *peer_addr,
sctp_init_chunk_t *peer_init, int priority);
......
......@@ -21,6 +21,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/if_bridge.h>
#include <linux/brlock.h>
#include <asm/uaccess.h>
#include "br_private.h"
......@@ -55,11 +56,6 @@ static int __init br_init(void)
return 0;
}
static void __br_clear_frame_hook(void)
{
br_handle_frame_hook = NULL;
}
static void __br_clear_ioctl_hook(void)
{
br_ioctl_hook = NULL;
......@@ -69,7 +65,11 @@ static void __exit br_deinit(void)
{
unregister_netdevice_notifier(&br_device_notifier);
br_call_ioctl_atomic(__br_clear_ioctl_hook);
net_call_rx_atomic(__br_clear_frame_hook);
br_write_lock_bh(BR_NETPROTO_LOCK);
br_handle_frame_hook = NULL;
br_write_unlock_bh(BR_NETPROTO_LOCK);
#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
br_fdb_get_hook = NULL;
br_fdb_put_hook = NULL;
......
......@@ -18,6 +18,7 @@
#include <linux/if_bridge.h>
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/brlock.h>
#include <asm/uaccess.h>
#include "br_private.h"
......@@ -37,7 +38,7 @@ static int br_initial_port_cost(struct net_device *dev)
return 100;
}
/* called under bridge lock */
/* called under BR_NETPROTO_LOCK and bridge lock */
static int __br_del_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
......@@ -86,10 +87,12 @@ static struct net_bridge **__find_br(char *name)
static void del_ifs(struct net_bridge *br)
{
write_lock_bh(&br->lock);
br_write_lock_bh(BR_NETPROTO_LOCK);
write_lock(&br->lock);
while (br->port_list != NULL)
__br_del_if(br, br->port_list->dev);
write_unlock_bh(&br->lock);
write_unlock(&br->lock);
br_write_unlock_bh(BR_NETPROTO_LOCK);
}
static struct net_bridge *new_nb(char *name)
......@@ -252,10 +255,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
{
int retval;
write_lock_bh(&br->lock);
br_write_lock_bh(BR_NETPROTO_LOCK);
write_lock(&br->lock);
retval = __br_del_if(br, dev);
br_stp_recalculate_bridge_id(br);
write_unlock_bh(&br->lock);
write_unlock(&br->lock);
br_write_unlock_bh(BR_NETPROTO_LOCK);
return retval;
}
......
......@@ -369,10 +369,19 @@ static void br_make_blocking(struct net_bridge_port *p)
static void br_make_forwarding(struct net_bridge_port *p)
{
if (p->state == BR_STATE_BLOCKING) {
if (p->br->stp_enabled) {
printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
p->br->dev.name, p->port_no, p->dev->name, "listening");
p->br->dev.name, p->port_no, p->dev->name,
"listening");
p->state = BR_STATE_LISTENING;
} else {
printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
p->br->dev.name, p->port_no, p->dev->name,
"learning");
p->state = BR_STATE_LEARNING;
}
br_timer_set(&p->forward_delay_timer, jiffies);
}
}
......
......@@ -1374,20 +1374,6 @@ static void net_tx_action(struct softirq_action *h)
}
}
/**
* net_call_rx_atomic
* @fn: function to call
*
* Make a function call that is atomic with respect to the protocol
* layers.
*/
void net_call_rx_atomic(void (*fn)(void))
{
br_write_lock_bh(BR_NETPROTO_LOCK);
fn();
br_write_unlock_bh(BR_NETPROTO_LOCK);
}
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
int (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
#endif
......
......@@ -984,16 +984,16 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
}
static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = {
[4] = { doit: inet_rtm_newaddr, },
[5] = { doit: inet_rtm_deladdr, },
[6] = { dumpit: inet_dump_ifaddr, },
[8] = { doit: inet_rtm_newroute, },
[9] = { doit: inet_rtm_delroute, },
[10] = { doit: inet_rtm_getroute, dumpit: inet_dump_fib, },
[4] = { .doit = inet_rtm_newaddr, },
[5] = { .doit = inet_rtm_deladdr, },
[6] = { .dumpit = inet_dump_ifaddr, },
[8] = { .doit = inet_rtm_newroute, },
[9] = { .doit = inet_rtm_delroute, },
[10] = { .doit = inet_rtm_getroute, .dumpit = inet_dump_fib, },
#ifdef CONFIG_IP_MULTIPLE_TABLES
[16] = { doit: inet_rtm_newrule, },
[17] = { doit: inet_rtm_delrule, },
[18] = { dumpit: inet_dump_rules, },
[16] = { .doit = inet_rtm_newrule, },
[17] = { .doit = inet_rtm_delrule, },
[18] = { .dumpit = inet_dump_rules, },
#endif
};
......
......@@ -229,7 +229,7 @@ static int igmp_send_report(struct net_device *dev, u32 group, int type)
iph->version = 4;
iph->ihl = (sizeof(struct iphdr)+4)>>2;
iph->tos = 0;
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->daddr = dst;
iph->saddr = rt->rt_src;
......
......@@ -126,7 +126,7 @@ static struct net_device ipgre_fb_tunnel_dev = {
static struct ip_tunnel ipgre_fb_tunnel = {
.dev = &ipgre_fb_tunnel_dev,
.parms ={ name: "gre0" }
.parms ={ .name = "gre0" }
};
/* Tunnel hash table */
......@@ -414,7 +414,7 @@ void ipgre_err(struct sk_buff *skb, u32 info)
struct sk_buff *skb2;
struct rtable *rt;
if (p[1] != __constant_htons(ETH_P_IP))
if (p[1] != htons(ETH_P_IP))
return;
flags = p[0];
......@@ -537,10 +537,10 @@ void ipgre_err(struct sk_buff *skb, u32 info)
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
{
if (INET_ECN_is_ce(iph->tos)) {
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
if (INET_ECN_is_not_ce(skb->nh.iph->tos))
IP_ECN_set_ce(skb->nh.iph);
} else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if (INET_ECN_is_not_ce(ip6_get_dsfield(skb->nh.ipv6h)))
IP6_ECN_set_ce(skb->nh.ipv6h);
}
......@@ -551,9 +551,9 @@ static inline u8
ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
{
u8 inner = 0;
if (skb->protocol == __constant_htons(ETH_P_IP))
if (skb->protocol == htons(ETH_P_IP))
inner = old_iph->tos;
else if (skb->protocol == __constant_htons(ETH_P_IPV6))
else if (skb->protocol == htons(ETH_P_IPV6))
inner = ip6_get_dsfield((struct ipv6hdr*)old_iph);
return INET_ECN_encapsulate(tos, inner);
}
......@@ -710,13 +710,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
rt = (struct rtable*)skb->dst;
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
#ifdef CONFIG_IPV6
else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
int addr_type;
struct neighbour *neigh = skb->dst->neighbour;
......@@ -744,7 +744,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tos = tiph->tos;
if (tos&1) {
if (skb->protocol == __constant_htons(ETH_P_IP))
if (skb->protocol == htons(ETH_P_IP))
tos = old_iph->tos;
tos &= ~1;
}
......@@ -767,13 +767,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
else
mtu = skb->dst ? skb->dst->pmtu : dev->mtu;
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
if (skb->dst && mtu < skb->dst->pmtu && mtu >= 68)
skb->dst->pmtu = mtu;
df |= (old_iph->frag_off&__constant_htons(IP_DF));
df |= (old_iph->frag_off&htons(IP_DF));
if ((old_iph->frag_off&__constant_htons(IP_DF)) &&
if ((old_iph->frag_off&htons(IP_DF)) &&
mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
......@@ -781,7 +781,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#ifdef CONFIG_IPV6
else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
if (rt6 && mtu < rt6->u.dst.pmtu && mtu >= IPV6_MIN_MTU) {
......@@ -847,10 +847,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->saddr = rt->rt_src;
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == __constant_htons(ETH_P_IP))
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
#ifdef CONFIG_IPV6
else if (skb->protocol == __constant_htons(ETH_P_IPV6))
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
#endif
else
......@@ -938,11 +938,11 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)) ||
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= __constant_htons(IP_DF);
p.iph.frag_off |= htons(IP_DF);
if (!(p.i_flags&GRE_KEY))
p.i_key = 0;
......
......@@ -136,7 +136,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = inet->ttl;
......@@ -187,7 +187,7 @@ __inline__ int ip_finish_output(struct sk_buff *skb)
struct net_device *dev = skb->dst->dev;
skb->dev = dev;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
ip_finish_output2);
......@@ -209,7 +209,7 @@ int ip_mc_output(struct sk_buff *skb)
#endif
skb->dev = dev;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
/*
* Multicasts are looped back for other local users
......@@ -394,7 +394,7 @@ int ip_queue_xmit(struct sk_buff *skb)
*((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
iph->tot_len = htons(skb->len);
if (ip_dont_fragment(sk, &rt->u.dst))
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = inet->ttl;
......@@ -463,7 +463,7 @@ static int ip_build_xmit_slow(struct sock *sk,
mtu = rt->u.dst.pmtu;
if (ip_dont_fragment(sk, &rt->u.dst))
df = __constant_htons(IP_DF);
df = htons(IP_DF);
length -= sizeof(struct iphdr);
......@@ -594,7 +594,7 @@ static int ip_build_xmit_slow(struct sock *sk,
/*
* Any further fragments will have MF set.
*/
mf = __constant_htons(IP_MF);
mf = htons(IP_MF);
}
if (rt->rt_type == RTN_MULTICAST)
iph->ttl = inet->mc_ttl;
......@@ -693,7 +693,7 @@ int ip_build_xmit(struct sock *sk,
*/
df = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
df = __constant_htons(IP_DF);
df = htons(IP_DF);
/*
* Fast path for unfragmented frames without options.
......@@ -797,7 +797,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & __constant_htons(IP_MF);
not_last_frag = iph->frag_off & htons(IP_MF);
/*
* Keep copying data until we run out.
......@@ -882,7 +882,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= __constant_htons(IP_MF);
iph->frag_off |= htons(IP_MF);
ptr += len;
offset += len;
......
......@@ -362,11 +362,11 @@ static int __init ic_defaults(void)
if (ic_netmask == INADDR_NONE) {
if (IN_CLASSA(ntohl(ic_myaddr)))
ic_netmask = __constant_htonl(IN_CLASSA_NET);
ic_netmask = htonl(IN_CLASSA_NET);
else if (IN_CLASSB(ntohl(ic_myaddr)))
ic_netmask = __constant_htonl(IN_CLASSB_NET);
ic_netmask = htonl(IN_CLASSB_NET);
else if (IN_CLASSC(ntohl(ic_myaddr)))
ic_netmask = __constant_htonl(IN_CLASSC_NET);
ic_netmask = htonl(IN_CLASSC_NET);
else {
printk(KERN_ERR "IP-Config: Unable to guess netmask for address %u.%u.%u.%u\n",
NIPQUAD(ic_myaddr));
......@@ -432,11 +432,11 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
goto drop;
/* If it's not a RARP reply, delete it. */
if (rarp->ar_op != __constant_htons(ARPOP_RREPLY))
if (rarp->ar_op != htons(ARPOP_RREPLY))
goto drop;
/* If it's not Ethernet, delete it. */
if (rarp->ar_pro != __constant_htons(ETH_P_IP))
if (rarp->ar_pro != htons(ETH_P_IP))
goto drop;
/* Extract variable-width fields */
......@@ -672,15 +672,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
h->version = 4;
h->ihl = 5;
h->tot_len = htons(sizeof(struct bootp_pkt));
h->frag_off = __constant_htons(IP_DF);
h->frag_off = htons(IP_DF);
h->ttl = 64;
h->protocol = IPPROTO_UDP;
h->daddr = INADDR_BROADCAST;
h->check = ip_fast_csum((unsigned char *) h, h->ihl);
/* Construct UDP header */
b->udph.source = __constant_htons(68);
b->udph.dest = __constant_htons(67);
b->udph.source = htons(68);
b->udph.dest = htons(67);
b->udph.len = htons(sizeof(struct bootp_pkt) - sizeof(struct iphdr));
/* UDP checksum not calculated -- explicitly allowed in BOOTP RFC */
......@@ -711,7 +711,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
/* Chain packet down the line... */
skb->dev = dev;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
if ((dev->hard_header &&
dev->hard_header(skb, dev, ntohs(skb->protocol), dev->broadcast, dev->dev_addr, skb->len) < 0) ||
dev_queue_xmit(skb) < 0)
......@@ -819,13 +819,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
ip_fast_csum((char *) h, h->ihl) != 0 ||
skb->len < ntohs(h->tot_len) ||
h->protocol != IPPROTO_UDP ||
b->udph.source != __constant_htons(67) ||
b->udph.dest != __constant_htons(68) ||
b->udph.source != htons(67) ||
b->udph.dest != htons(68) ||
ntohs(h->tot_len) < ntohs(b->udph.len) + sizeof(struct iphdr))
goto drop;
/* Fragments are not supported */
if (h->frag_off & __constant_htons(IP_OFFSET | IP_MF)) {
if (h->frag_off & htons(IP_OFFSET | IP_MF)) {
printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented reply.\n");
goto drop;
}
......
......@@ -129,7 +129,7 @@ static struct net_device ipip_fb_tunnel_dev = {
static struct ip_tunnel ipip_fb_tunnel = {
.dev = &ipip_fb_tunnel_dev,
.parms ={ name: "tunl0", }
.parms ={ .name = "tunl0", }
};
static struct ip_tunnel *tunnels_r_l[HASH_SIZE];
......@@ -483,7 +483,7 @@ int ipip_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
read_lock(&ipip_lock);
......@@ -544,7 +544,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
if (skb->protocol != __constant_htons(ETH_P_IP))
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
if (tos&1)
......@@ -585,9 +585,9 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->dst && mtu < skb->dst->pmtu)
skb->dst->pmtu = mtu;
df |= (old_iph->frag_off&__constant_htons(IP_DF));
df |= (old_iph->frag_off&htons(IP_DF));
if ((old_iph->frag_off&__constant_htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
goto tx_error;
......@@ -703,10 +703,10 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)))
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= __constant_htons(IP_DF);
p.iph.frag_off |= htons(IP_DF);
t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
......
......@@ -1434,7 +1434,7 @@ int pim_rcv_v1(struct sk_buff * skb)
skb->nh.iph = (struct iphdr *)skb->data;
skb->dev = reg_dev;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
......@@ -1501,7 +1501,7 @@ int pim_rcv(struct sk_buff * skb)
skb->nh.iph = (struct iphdr *)skb->data;
skb->dev = reg_dev;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
......
......@@ -804,7 +804,7 @@ do_bindings(struct ip_conntrack *ct,
/* Always defragged for helpers */
IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
& __constant_htons(IP_MF|IP_OFFSET)));
& htons(IP_MF|IP_OFFSET)));
/* Have to grab read lock before sibling_list traversal */
READ_LOCK(&ip_conntrack_lock);
......
......@@ -1260,9 +1260,9 @@ static unsigned int nat_help(struct ip_conntrack *ct,
* on post routing (SNAT).
*/
if (!((dir == IP_CT_DIR_REPLY && hooknum == NF_IP_PRE_ROUTING &&
udph->source == __constant_ntohs(SNMP_PORT)) ||
udph->source == ntohs(SNMP_PORT)) ||
(dir == IP_CT_DIR_ORIGINAL && hooknum == NF_IP_POST_ROUTING &&
udph->dest == __constant_ntohs(SNMP_TRAP_PORT)))) {
udph->dest == ntohs(SNMP_TRAP_PORT)))) {
spin_unlock_bh(&snmp_lock);
return NF_ACCEPT;
}
......
......@@ -75,7 +75,7 @@ ip_nat_fn(unsigned int hooknum,
/* We never see fragments: conntrack defrags on pre-routing
and local-out, and ip_nat_out protects post-routing. */
IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
& __constant_htons(IP_MF|IP_OFFSET)));
& htons(IP_MF|IP_OFFSET)));
(*pskb)->nfcache |= NFC_UNKNOWN;
......@@ -186,7 +186,7 @@ ip_nat_out(unsigned int hooknum,
I'm starting to have nightmares about fragments. */
if ((*pskb)->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
*pskb = ip_ct_gather_frags(*pskb);
if (!*pskb)
......
......@@ -1244,7 +1244,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
return -EINVAL;
if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||
skb->protocol != __constant_htons(ETH_P_IP))
skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (ZERONET(saddr)) {
......@@ -1455,7 +1455,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(res))))
flags |= RTCF_DOREDIRECT;
if (skb->protocol != __constant_htons(ETH_P_IP)) {
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*/
......@@ -1520,7 +1520,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
out: return err;
brd_input:
if (skb->protocol != __constant_htons(ETH_P_IP))
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (ZERONET(saddr))
......@@ -2154,7 +2154,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
err = -ENODEV;
if (!dev)
goto out;
skb->protocol = __constant_htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
local_bh_disable();
err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
......
......@@ -360,7 +360,7 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
break;
if (sk->family == AF_INET6 && cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == __constant_htonl(0xffff) &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr+3, cond->addr, cond->prefix_len))
break;
}
......
......@@ -2103,7 +2103,7 @@ static __inline__ int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr
} else if (tp->tstamp_ok &&
th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
__u32 *ptr = (__u32 *)(th + 1);
if (*ptr == __constant_ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
tp->saw_tstamp = 1;
++ptr;
......@@ -3275,7 +3275,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
__u32 *ptr = (__u32 *)(th + 1);
/* No? Slow path! */
if (*ptr != __constant_ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
goto slow_path;
......
......@@ -81,8 +81,8 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
.__tcp_lhash_lock = RW_LOCK_UNLOCKED,
.__tcp_lhash_users = ATOMIC_INIT(0),
__tcp_lhash_wait:
__WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
.__tcp_lhash_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
.__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
};
......
......@@ -428,7 +428,7 @@ static void tcp_twkill(unsigned long);
static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS];
static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
static struct timer_list tcp_tw_timer = { function: tcp_twkill };
static struct timer_list tcp_tw_timer = { .function = tcp_twkill };
static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
{
......@@ -495,7 +495,7 @@ void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
static int tcp_twcal_hand = -1;
static int tcp_twcal_jiffie;
static void tcp_twcal_tick(unsigned long);
static struct timer_list tcp_twcal_timer = {function: tcp_twcal_tick};
static struct timer_list tcp_twcal_timer = {.function = tcp_twcal_tick};
static struct tcp_tw_bucket *tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
......
......@@ -94,7 +94,7 @@ rwlock_t addrconf_lock = RW_LOCK_UNLOCKED;
static void addrconf_verify(unsigned long);
static struct timer_list addr_chk_timer = { function: addrconf_verify };
static struct timer_list addr_chk_timer = { .function = addrconf_verify };
static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED;
static int addrconf_ifdown(struct net_device *dev, int how);
......@@ -144,47 +144,47 @@ int ipv6_addr_type(struct in6_addr *addr)
/* Consider all addresses with the first three bits different of
000 and 111 as unicasts.
*/
if ((st & __constant_htonl(0xE0000000)) != __constant_htonl(0x00000000) &&
(st & __constant_htonl(0xE0000000)) != __constant_htonl(0xE0000000))
if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
(st & htonl(0xE0000000)) != htonl(0xE0000000))
return IPV6_ADDR_UNICAST;
if ((st & __constant_htonl(0xFF000000)) == __constant_htonl(0xFF000000)) {
if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
int type = IPV6_ADDR_MULTICAST;
switch((st & __constant_htonl(0x00FF0000))) {
case __constant_htonl(0x00010000):
switch((st & htonl(0x00FF0000))) {
case htonl(0x00010000):
type |= IPV6_ADDR_LOOPBACK;
break;
case __constant_htonl(0x00020000):
case htonl(0x00020000):
type |= IPV6_ADDR_LINKLOCAL;
break;
case __constant_htonl(0x00050000):
case htonl(0x00050000):
type |= IPV6_ADDR_SITELOCAL;
break;
};
return type;
}
if ((st & __constant_htonl(0xFFC00000)) == __constant_htonl(0xFE800000))
if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST);
if ((st & __constant_htonl(0xFFC00000)) == __constant_htonl(0xFEC00000))
if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST);
if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
if (addr->s6_addr32[2] == 0) {
if (addr->in6_u.u6_addr32[3] == 0)
if (addr->s6_addr32[3] == 0)
return IPV6_ADDR_ANY;
if (addr->s6_addr32[3] == __constant_htonl(0x00000001))
if (addr->s6_addr32[3] == htonl(0x00000001))
return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST);
return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST);
}
if (addr->s6_addr32[2] == __constant_htonl(0x0000ffff))
if (addr->s6_addr32[2] == htonl(0x0000ffff))
return IPV6_ADDR_MAPPED;
}
......@@ -755,7 +755,7 @@ static void addrconf_add_mroute(struct net_device *dev)
memset(&rtmsg, 0, sizeof(rtmsg));
ipv6_addr_set(&rtmsg.rtmsg_dst,
__constant_htonl(0xFF000000), 0, 0, 0);
htonl(0xFF000000), 0, 0, 0);
rtmsg.rtmsg_dst_len = 8;
rtmsg.rtmsg_metric = IP6_RT_PRIO_ADDRCONF;
rtmsg.rtmsg_ifindex = dev->ifindex;
......@@ -785,7 +785,7 @@ static void addrconf_add_lroute(struct net_device *dev)
{
struct in6_addr addr;
ipv6_addr_set(&addr, __constant_htonl(0xFE800000), 0, 0, 0);
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
addrconf_prefix_route(&addr, 10, dev, 0, RTF_ADDRCONF);
}
......@@ -1123,7 +1123,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
if (idev->dev->flags&IFF_POINTOPOINT) {
addr.s6_addr32[0] = __constant_htonl(0xfe800000);
addr.s6_addr32[0] = htonl(0xfe800000);
scope = IFA_LINK;
} else {
scope = IPV6_ADDR_COMPATv4;
......@@ -1237,9 +1237,7 @@ static void addrconf_dev_config(struct net_device *dev)
return;
memset(&addr, 0, sizeof(struct in6_addr));
addr.s6_addr[0] = 0xFE;
addr.s6_addr[1] = 0x80;
addr.s6_addr32[0] = htonl(0xFE800000);
if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
addrconf_add_linklocal(idev, &addr);
......
......@@ -150,7 +150,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
}
} else {
ipv6_addr_set(&sin->sin6_addr, 0, 0,
__constant_htonl(0xffff),
htonl(0xffff),
*(u32*)(skb->nh.raw + serr->addr_offset));
}
}
......@@ -173,7 +173,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
struct inet_opt *inet = inet_sk(sk);
ipv6_addr_set(&sin->sin6_addr, 0, 0,
__constant_htonl(0xffff),
htonl(0xffff),
skb->nh.iph->saddr);
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
......
......@@ -198,7 +198,7 @@ static int is_ineligible(struct sk_buff *skb)
u8 type;
if (skb_copy_bits(skb, ptr+offsetof(struct icmp6hdr, icmp6_type),
&type, 1)
|| !(type & 0x80))
|| !(type & ICMPV6_INFOMSG_MASK))
return 1;
}
return 0;
......@@ -216,7 +216,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
int res = 0;
/* Informational messages are not limited. */
if (type & 0x80)
if (type & ICMPV6_INFOMSG_MASK)
return 1;
/* Do not limit pmtu discovery, it would break it. */
......@@ -519,22 +519,22 @@ static int icmpv6_rcv(struct sk_buff *skb)
skb_checksum(skb, 0, skb->len, 0))) {
if (net_ratelimit())
printk(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
ntohs(saddr->in6_u.u6_addr16[0]),
ntohs(saddr->in6_u.u6_addr16[1]),
ntohs(saddr->in6_u.u6_addr16[2]),
ntohs(saddr->in6_u.u6_addr16[3]),
ntohs(saddr->in6_u.u6_addr16[4]),
ntohs(saddr->in6_u.u6_addr16[5]),
ntohs(saddr->in6_u.u6_addr16[6]),
ntohs(saddr->in6_u.u6_addr16[7]),
ntohs(daddr->in6_u.u6_addr16[0]),
ntohs(daddr->in6_u.u6_addr16[1]),
ntohs(daddr->in6_u.u6_addr16[2]),
ntohs(daddr->in6_u.u6_addr16[3]),
ntohs(daddr->in6_u.u6_addr16[4]),
ntohs(daddr->in6_u.u6_addr16[5]),
ntohs(daddr->in6_u.u6_addr16[6]),
ntohs(daddr->in6_u.u6_addr16[7]));
ntohs(saddr->s6_addr16[0]),
ntohs(saddr->s6_addr16[1]),
ntohs(saddr->s6_addr16[2]),
ntohs(saddr->s6_addr16[3]),
ntohs(saddr->s6_addr16[4]),
ntohs(saddr->s6_addr16[5]),
ntohs(saddr->s6_addr16[6]),
ntohs(saddr->s6_addr16[7]),
ntohs(daddr->s6_addr16[0]),
ntohs(daddr->s6_addr16[1]),
ntohs(daddr->s6_addr16[2]),
ntohs(daddr->s6_addr16[3]),
ntohs(daddr->s6_addr16[4]),
ntohs(daddr->s6_addr16[5]),
ntohs(daddr->s6_addr16[6]),
ntohs(daddr->s6_addr16[7]));
goto discard_it;
}
}
......@@ -613,7 +613,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
printk(KERN_DEBUG "icmpv6: msg of unkown type\n");
/* informational */
if (type & 0x80)
if (type & ICMPV6_INFOMSG_MASK)
break;
/*
......
......@@ -93,7 +93,7 @@ static struct fib6_node * fib6_repair_tree(struct fib6_node *fn);
static __u32 rt_sernum = 0;
static struct timer_list ip6_fib_timer = { function: fib6_run_gc };
static struct timer_list ip6_fib_timer = { .function = fib6_run_gc };
static struct fib6_walker_t fib6_walker_list = {
&fib6_walker_list, &fib6_walker_list,
......
......@@ -102,7 +102,7 @@ int ip6_output(struct sk_buff *skb)
struct dst_entry *dst = skb->dst;
struct net_device *dev = dst->dev;
skb->protocol = __constant_htons(ETH_P_IPV6);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
......@@ -223,7 +223,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
* Fill in the IPv6 header
*/
*(u32*)hdr = __constant_htonl(0x60000000) | fl->fl6_flowlabel;
*(u32*)hdr = htonl(0x60000000) | fl->fl6_flowlabel;
hlimit = -1;
if (np)
hlimit = np->hop_limit;
......@@ -264,7 +264,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
struct ipv6hdr *hdr;
int totlen;
skb->protocol = __constant_htons(ETH_P_IPV6);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
totlen = len + sizeof(struct ipv6hdr);
......
......@@ -408,14 +408,8 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
*/
if (e->info->hook == NF_IP_LOCAL_OUT) {
struct ipv6hdr *iph = e->skb->nh.ipv6h;
if (!( iph->daddr.in6_u.u6_addr32[0] == e->rt_info.daddr.in6_u.u6_addr32[0]
&& iph->daddr.in6_u.u6_addr32[1] == e->rt_info.daddr.in6_u.u6_addr32[1]
&& iph->daddr.in6_u.u6_addr32[2] == e->rt_info.daddr.in6_u.u6_addr32[2]
&& iph->daddr.in6_u.u6_addr32[3] == e->rt_info.daddr.in6_u.u6_addr32[3]
&& iph->saddr.in6_u.u6_addr32[0] == e->rt_info.saddr.in6_u.u6_addr32[0]
&& iph->saddr.in6_u.u6_addr32[1] == e->rt_info.saddr.in6_u.u6_addr32[1]
&& iph->saddr.in6_u.u6_addr32[2] == e->rt_info.saddr.in6_u.u6_addr32[2]
&& iph->saddr.in6_u.u6_addr32[3] == e->rt_info.saddr.in6_u.u6_addr32[3]))
if (ipv6_addr_cmp(&iph->daddr, &e->rt_info.daddr) ||
ipv6_addr_cmp(&iph->saddr, &e->rt_info.saddr))
return route6_me_harder(e->skb);
}
return 0;
......
......@@ -112,7 +112,7 @@ static void dump_packet(const struct ip6t_log_info *info,
printk("FRAG:%u ", ntohs(fhdr->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fhdr->frag_off & __constant_htons(0x0001))
if (fhdr->frag_off & htons(0x0001))
printk("INCOMPLETE ");
printk("ID:%08x ", fhdr->identification);
......
......@@ -372,7 +372,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
/* Is this the final fragment? */
if (!(fhdr->frag_off & __constant_htons(0x0001))) {
if (!(fhdr->frag_off & htons(0x0001))) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
......@@ -648,7 +648,7 @@ int ipv6_reassembly(struct sk_buff **skbp, int nhoff)
hdr = skb->nh.ipv6h;
fhdr = (struct frag_hdr *)skb->h.raw;
if (!(fhdr->frag_off & __constant_htons(0xFFF9))) {
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
skb->h.raw += sizeof(struct frag_hdr);
IP6_INC_STATS_BH(Ip6ReasmOKs);
......
......@@ -396,7 +396,7 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = __constant_htons(ETH_P_IPV6);
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
tunnel->stat.rx_packets++;
tunnel->stat.rx_bytes += skb->len;
......@@ -470,7 +470,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
if (skb->protocol != __constant_htons(ETH_P_IPV6))
if (skb->protocol != htons(ETH_P_IPV6))
goto tx_error;
if (!dst)
......@@ -588,7 +588,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
if (mtu > IPV6_MIN_MTU)
iph->frag_off = __constant_htons(IP_DF);
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
......@@ -659,10 +659,10 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)))
p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
goto done;
if (p.iph.ttl)
p.iph.frag_off |= __constant_htons(IP_DF);
p.iph.frag_off |= htons(IP_DF);
t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
......
......@@ -424,11 +424,11 @@ static int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
if (skb->protocol == __constant_htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP)) {
struct inet_opt *inet = inet_sk(sk);
ipv6_addr_set(&sin6->sin6_addr, 0, 0,
__constant_htonl(0xffff), skb->nh.iph->saddr);
htonl(0xffff), skb->nh.iph->saddr);
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
......
......@@ -587,7 +587,6 @@ EXPORT_SYMBOL(ip_route_me_harder);
EXPORT_SYMBOL(register_gifconf);
EXPORT_SYMBOL(net_call_rx_atomic);
EXPORT_SYMBOL(softnet_data);
#if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
......
......@@ -97,7 +97,7 @@ const char *sctp_cname(const sctp_subtype_t cid)
/* These are printable form of variable-length parameters. */
const char *sctp_param_tbl[SCTP_PARAM_ECN_CAPABLE + 1] = {
"",
"PARAM_HEATBEAT_INFO",
"PARAM_HEARTBEAT_INFO",
"",
"",
"",
......
......@@ -104,7 +104,7 @@ void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
void sctp_outqueue_teardown(sctp_outqueue_t *q)
{
sctp_transport_t *transport;
struct list_head *lchunk, *pos;
struct list_head *lchunk, *pos, *temp;
sctp_chunk_t *chunk;
/* Throw away unacknowledged chunks. */
......@@ -117,6 +117,13 @@ void sctp_outqueue_teardown(sctp_outqueue_t *q)
}
}
/* Throw away chunks that have been gap ACKed. */
list_for_each_safe(lchunk, temp, &q->sacked) {
list_del(lchunk);
chunk = list_entry(lchunk, sctp_chunk_t, transmitted_list);
sctp_free_chunk(chunk);
}
/* Throw away any leftover chunks. */
while ((chunk = (sctp_chunk_t *) skb_dequeue(&q->out)))
sctp_free_chunk(chunk);
......
......@@ -223,7 +223,7 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
int priority)
int priority, int unkparam_len)
{
sctp_inithdr_t initack;
sctp_chunk_t *retval;
......@@ -278,7 +278,10 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
if (!cookie)
goto nomem_cookie;
chunksize = sizeof(initack) + addrs_len + cookie_len;
/* Calculate the total size of allocation, include the reserved
* space for reporting unknown parameters if it is specified.
*/
chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
/* Tell peer that we'll do ECN only if peer advertised such cap. */
if (asoc->peer.ecn_capable)
......@@ -883,25 +886,27 @@ sctp_chunk_t *sctp_make_heartbeat_ack(const sctp_association_t *asoc,
return retval;
}
/* Create an Operation Error chunk. */
sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
/* Create an Operation Error chunk with the specified space reserved.
* This routine can be used for containing multiple causes in the chunk.
*/
sctp_chunk_t *sctp_make_op_error_space(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
__u16 cause_code, const void *payload,
size_t paylen)
size_t size)
{
sctp_chunk_t *retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
sizeof(sctp_errhdr_t) + paylen);
sctp_chunk_t *retval;
retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
sizeof(sctp_errhdr_t) + size);
if (!retval)
goto nodata;
sctp_init_cause(retval, cause_code, payload, paylen);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
* An endpoint SHOULD transmit reply chunks (e.g., SACK,
* HEARTBEAT ACK, * etc.) to the same destination transport
* address from which it * received the DATA or control chunk
* HEARTBEAT ACK, etc.) to the same destination transport
* address from which it received the DATA or control chunk
* to which it is replying.
*
*/
if (chunk)
retval->transport = chunk->transport;
......@@ -910,6 +915,23 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
return retval;
}
/* Create an Operation Error chunk. */
sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
__u16 cause_code, const void *payload,
size_t paylen)
{
sctp_chunk_t *retval = sctp_make_op_error_space(asoc, chunk, paylen);
if (!retval)
goto nodata;
sctp_init_cause(retval, cause_code, payload, paylen);
nodata:
return retval;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
......@@ -1405,6 +1427,162 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
* 3rd Level Abstractions
********************************************************************/
/* Verify the INIT packet before we process it. */
int sctp_verify_init(const sctp_association_t *asoc,
sctp_cid_t cid,
sctp_init_chunk_t *peer_init,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chk_p)
{
sctpParam_t param;
uint8_t *end;
/* FIXME - Verify the fixed fields of the INIT chunk. Also, verify
* the mandatory parameters somewhere here and generate either the
* "Missing mandatory parameter" error or the "Invalid mandatory
* parameter" error. */
/* Find unrecognized parameters. */
end = ((uint8_t *)peer_init + ntohs(peer_init->chunk_hdr.length));
for (param.v = peer_init->init_hdr.params;
param.v < end;
param.v += WORD_ROUND(ntohs(param.p->length))) {
if (!sctp_verify_param(asoc, param, cid, chunk, err_chk_p))
return 0;
} /* for (loop through all parameters) */
return 1;
}
/* Find unrecognized parameters in the chunk.
* Return values:
* 0 - discard the chunk
* 1 - continue with the chunk
*/
int sctp_verify_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_cid_t cid,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chk_p)
{
int retval = 1;
/* FIXME - This routine is not looking at each parameter per the
* chunk type, i.e., unrecognized parameters should be further
* identified based on the chunk id.
*/
switch (param.p->type) {
case SCTP_PARAM_IPV4_ADDRESS:
case SCTP_PARAM_IPV6_ADDRESS:
case SCTP_PARAM_COOKIE_PRESERVATIVE:
/* FIXME - If we don't support the host name parameter, we should
* generate an error for this - Unresolvable address.
*/
case SCTP_PARAM_HOST_NAME_ADDRESS:
case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES:
case SCTP_PARAM_STATE_COOKIE:
case SCTP_PARAM_HEARTBEAT_INFO:
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
case SCTP_PARAM_ECN_CAPABLE:
break;
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
return sctp_process_unk_param(asoc, param, chunk, err_chk_p);
break;
}
return retval;
}
/* RFC 3.2.1 & the Implementers Guide 2.2.
*
* The Parameter Types are encoded such that the
* highest-order two bits specify the action that must be
* taken if the processing endpoint does not recognize the
* Parameter Type.
*
* 00 - Stop processing this SCTP chunk and discard it,
* do not process any further chunks within it.
*
* 01 - Stop processing this SCTP chunk and discard it,
* do not process any further chunks within it, and report
* the unrecognized parameter in an 'Unrecognized
* Parameter Type' (in either an ERROR or in the INIT ACK).
*
* 10 - Skip this parameter and continue processing.
*
* 11 - Skip this parameter and continue processing but
* report the unrecognized parameter in an
* 'Unrecognized Parameter Type' (in either an ERROR or in
* the INIT ACK).
*
* Return value:
* 0 - discard the chunk
* 1 - continue with the chunk
*/
int sctp_process_unk_param(const sctp_association_t *asoc,
sctpParam_t param,
sctp_chunk_t *chunk,
sctp_chunk_t **err_chk_p)
{
int retval = 1;
switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
case SCTP_PARAM_ACTION_DISCARD:
retval = 0;
break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = 0;
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if (NULL == *err_chk_p)
*err_chk_p = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
if (*err_chk_p)
sctp_init_cause(*err_chk_p, SCTP_ERROR_UNKNOWN_PARAM,
(const void *)param.p,
WORD_ROUND(ntohs(param.p->length)));
break;
case SCTP_PARAM_ACTION_SKIP:
break;
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if (NULL == *err_chk_p)
*err_chk_p = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
if (*err_chk_p) {
sctp_init_cause(*err_chk_p, SCTP_ERROR_UNKNOWN_PARAM,
(const void *)param.p,
WORD_ROUND(ntohs(param.p->length)));
} else {
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
* to the peer and the association won't be established.
*/
retval = 0;
}
break;
default:
break;
}
return retval;
}
/* Unpack the parameters in an INIT packet.
* FIXME: There is no return status to allow callers to do
* error handling.
......@@ -1609,9 +1787,9 @@ int sctp_process_param(sctp_association_t *asoc, sctpParam_t param,
asoc->peer.cookie = param.cookie->body;
break;
case SCTP_PARAM_HEATBEAT_INFO:
case SCTP_PARAM_HEARTBEAT_INFO:
SCTP_DEBUG_PRINTK("unimplemented "
"SCTP_PARAM_HEATBEAT_INFO\n");
"SCTP_PARAM_HEARTBEAT_INFO\n");
break;
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
......@@ -1624,14 +1802,13 @@ int sctp_process_param(sctp_association_t *asoc, sctpParam_t param,
break;
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
* called prior to this routine. Simply log the error
* here.
*/
SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n",
ntohs(param.p->type), asoc);
/* FIXME: The entire parameter processing really needs
* redesigned. For now, always return success as doing
* otherwise craters the system.
*/
retval = 1;
break;
};
......
......@@ -327,7 +327,8 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_GEN_INIT_ACK:
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC);
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
if (!new_obj)
goto nomem;
......@@ -344,10 +345,20 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_GEN_COOKIE_ECHO:
/* Generate a COOKIE ECHO chunk. */
new_obj = sctp_make_cookie_echo(asoc, chunk);
if (!new_obj)
if (!new_obj) {
if (command->obj.ptr)
sctp_free_chunk(command->obj.ptr);
goto nomem;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
if (command->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(command->obj.ptr));
break;
case SCTP_CMD_GEN_SHUTDOWN:
......@@ -397,8 +408,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
/* Send a full packet to our peer. */
packet = command->obj.ptr;
sctp_packet_transmit(packet);
sctp_transport_free(packet->transport);
sctp_packet_free(packet);
sctp_ootb_pkt_free(packet);
break;
case SCTP_CMD_RETRAN:
......
......@@ -194,6 +194,10 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
sctp_chunk_t *chunk = arg;
sctp_chunk_t *repl;
sctp_association_t *new_asoc;
sctp_chunk_t *err_chunk;
sctp_packet_t *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT.
......@@ -208,6 +212,36 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
if (!chunk->singleton)
return SCTP_DISPOSITION_VIOLATION;
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_free_chunk(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
}
} else {
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
commands);
}
}
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
......@@ -230,10 +264,41 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC);
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk)
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_ack;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
sctp_free_chunk(err_chunk);
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
......@@ -248,6 +313,9 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
nomem_ack:
sctp_association_free(new_asoc);
if (err_chunk)
sctp_free_chunk(err_chunk);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -289,6 +357,10 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
sctp_chunk_t *chunk = arg;
sctp_init_chunk_t *initchunk;
__u32 init_tag;
sctp_chunk_t *err_chunk;
sctp_packet_t *packet;
sctp_disposition_t ret;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
......@@ -319,6 +391,49 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_free_chunk(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
SCTP_NULL());
return SCTP_DISPOSITION_NOMEM;
}
} else {
ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
commands);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
SCTP_NULL());
return ret;
}
}
/* Tag the variable length paramters. Note that we never
* convert the parameters in an INIT chunk.
*/
......@@ -345,7 +460,12 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
/* 5.1 C) "A" shall then send the State Cookie received in the
* INIT ACK chunk in a COOKIE ECHO chunk, ...
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO, SCTP_NULL());
/* If there is any errors to report, send the ERROR chunk generated
* for unknown parameters as well.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO,
SCTP_CHUNK(err_chunk));
return SCTP_DISPOSITION_CONSUME;
nomem:
......@@ -579,7 +699,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
* HEARTBEAT is sent (see Section 8.3).
*/
hbinfo.param_hdr.type = SCTP_PARAM_HEATBEAT_INFO;
hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
hbinfo.daddr = transport->ipaddr;
hbinfo.sent_at = jiffies;
......@@ -852,6 +972,11 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
sctp_chunk_t *chunk = arg;
sctp_chunk_t *repl;
sctp_association_t *new_asoc;
sctp_chunk_t *err_chunk;
sctp_packet_t *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
......@@ -866,6 +991,36 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_free_chunk(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
}
} else {
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
commands);
}
}
/*
* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
......@@ -887,10 +1042,41 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
sctp_tietags_populate(new_asoc, asoc);
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC);
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk) {
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
}
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
sctp_free_chunk(err_chunk);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
......@@ -903,6 +1089,9 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
return SCTP_DISPOSITION_CONSUME;
nomem:
if (err_chunk)
sctp_free_chunk(err_chunk);
return SCTP_DISPOSITION_NOMEM;
}
......@@ -1948,17 +2137,16 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
/* This is a new TSN. */
/* If we don't have any room in our receive window, discard.
* Actually, allow a little bit of overflow (up to a MTU of
* of overflow).
/* Discard if there is no room in the receive window.
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(sctp_data_chunk_t);
if (!asoc->rwnd || (datalen > asoc->frag_point)) {
if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) {
SCTP_DEBUG_PRINTK("Discarding tsn: %u datalen: %Zd, "
"rwnd: %d\n", tsn, datalen, asoc->rwnd);
goto discard_noforce;
goto discard_force;
}
/*
......@@ -2330,59 +2518,32 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_packet_t *packet = NULL;
sctp_transport_t *transport = NULL;
sctp_chunk_t *chunk = arg;
sctp_chunk_t *abort;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Grub in chunk and endpoint for kewl bitz. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
/* -- Make sure the ABORT packet's V-tag is the same as the
* inbound packet if no association exists, otherwise use
* the peer's vtag.
*/
if (asoc)
vtag = asoc->peer.i.init_tag;
else
vtag = ntohl(chunk->sctp_hdr->vtag);
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Make a packet for the ABORT to go into. */
packet = t_new(sctp_packet_t, GFP_ATOMIC);
if (!packet)
goto nomem_packet;
packet = sctp_packet_init(packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0, NULL);
packet = sctp_ootb_pkt_new(asoc, chunk);
/* Make an ABORT.
* This will set the T bit since we have no association.
if (packet) {
/* Make an ABORT. The T bit will be set if the asoc
* is NULL.
*/
abort = sctp_make_abort(NULL, chunk, 0);
if (!abort)
goto nomem_chunk;
abort = sctp_make_abort(asoc, chunk, 0);
if (!abort) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet));
return SCTP_DISPOSITION_DISCARD;
nomem_chunk:
sctp_packet_free(packet);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
nomem_packet:
sctp_transport_free(transport);
return SCTP_DISPOSITION_CONSUME;
}
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -2560,60 +2721,35 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_packet_t *packet = NULL;
sctp_transport_t *transport = NULL;
sctp_chunk_t *chunk = arg;
sctp_chunk_t *shut;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Grub in chunk and endpoint for kewl bitz. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
packet = sctp_ootb_pkt_new(asoc, chunk);
/* Make sure the ABORT packet's V-tag is the same as the
* inbound packet if no association exists, otherwise use
* the peer's vtag.
if (packet) {
/* Make an SHUTDOWN_COMPLETE.
* The T bit will be set if the asoc is NULL.
*/
vtag = ntohl(chunk->sctp_hdr->vtag);
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Make a packet for the ABORT to go into. */
packet = t_new(sctp_packet_t, GFP_ATOMIC);
if (!packet)
goto nomem_packet;
packet = sctp_packet_init(packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0, NULL);
/* Make an ABORT.
* This will set the T bit since we have no association.
*/
shut = sctp_make_shutdown_complete(NULL, chunk);
if (!shut)
goto nomem_chunk;
shut = sctp_make_shutdown_complete(asoc, chunk);
if (!shut) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Set the skb to the belonging sock for accounting. */
shut->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, shut);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet));
return SCTP_DISPOSITION_CONSUME;
nomem_chunk:
sctp_packet_free(packet);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
nomem_packet:
sctp_transport_free(transport);
return SCTP_DISPOSITION_CONSUME;
}
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an unknown chunk.
*
......@@ -3949,3 +4085,93 @@ sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *chunk)
skb_pull(chunk->skb, (num_blocks + num_dup_tsns) * sizeof(__u32));
return sack;
}
/* Create an ABORT packet to be sent as a response, with the specified
* error causes.
*/
sctp_packet_t *sctp_abort_pkt_new(const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
sctp_chunk_t *chunk,
const void *payload,
size_t paylen)
{
sctp_packet_t *packet;
sctp_chunk_t *abort;
packet = sctp_ootb_pkt_new(asoc, chunk);
if (packet) {
/* Make an ABORT.
* The T bit will be set if the asoc is NULL.
*/
abort = sctp_make_abort(asoc, chunk, paylen);
if (!abort) {
sctp_ootb_pkt_free(packet);
return NULL;
}
/* Add specified error causes, i.e., payload, to the
* end of the chunk.
*/
sctp_addto_chunk(abort, paylen, payload);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
}
return packet;
}
/* Allocate a packet for responding in the OOTB conditions. */
sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
const sctp_chunk_t *chunk)
{
sctp_packet_t *packet;
sctp_transport_t *transport;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Get the source and destination port from the inbound packet. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
/* The V-tag is going to be the same as the inbound packet if no
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
vtag = asoc->peer.i.init_tag;
} else {
vtag = ntohl(chunk->sctp_hdr->vtag);
}
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Allocate a new packet for sending the response. */
packet = t_new(sctp_packet_t, GFP_ATOMIC);
if (!packet)
goto nomem_packet;
packet = sctp_packet_init(packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0, NULL);
return packet;
nomem_packet:
sctp_transport_free(transport);
nomem:
return NULL;
}
/* Free the packet allocated earlier for responding in the OOTB condition. */
void sctp_ootb_pkt_free(sctp_packet_t *packet)
{
sctp_transport_free(packet->transport);
sctp_packet_free(packet);
}
......@@ -73,8 +73,12 @@
#include <net/sock.h>
#include <net/sctp/sctp.h>
/* WARNING: Please do not remove the SCTP_STATIC attribute to
* any of the functions below as they are used to export functions
* used by a project regression testsuite.
*/
/* Forward declarations for internal helper functions. */
static void __sctp_write_space(sctp_association_t *asoc);
static int sctp_writeable(struct sock *sk);
static inline int sctp_wspace(sctp_association_t *asoc);
static inline void sctp_set_owner_w(sctp_chunk_t *chunk);
......@@ -92,8 +96,7 @@ static int sctp_bindx_add(struct sock *, struct sockaddr_storage *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr_storage *, int);
static int sctp_do_bind(struct sock *, sockaddr_storage_t *, int);
static int sctp_autobind(struct sock *sk);
static sctp_bind_bucket_t *sctp_bucket_create(sctp_bind_hashbucket_t *head,
unsigned short snum);
/* API 3.1.2 bind() - UDP Style Syntax
* The syntax of bind() is,
......@@ -132,7 +135,8 @@ int sctp_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static long sctp_get_port_local(struct sock *, unsigned short);
/* Bind a local address either to an endpoint or to an association. */
static int sctp_do_bind(struct sock *sk, sockaddr_storage_t *newaddr, int addr_len)
SCTP_STATIC int sctp_do_bind(struct sock *sk, sockaddr_storage_t *newaddr,
int addr_len)
{
sctp_opt_t *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
......@@ -603,7 +607,7 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
*
* Returns 0 if ok, <0 errno code on error.
*/
static int sctp_setsockopt_bindx(struct sock* sk,
SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
struct sockaddr_storage *addrs,
int addrssize, int op)
{
......@@ -659,7 +663,7 @@ static int sctp_setsockopt_bindx(struct sock* sk,
* If sd in the close() call is a branched-off socket representing only
* one association, the shutdown is performed on that association only.
*/
static void sctp_close(struct sock *sk, long timeout)
SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
{
sctp_endpoint_t *ep;
sctp_association_t *asoc;
......@@ -727,9 +731,9 @@ static void sctp_close(struct sock *sk, long timeout)
/* BUG: We do not implement timeouts. */
/* BUG: We do not implement the equivalent of wait_for_tcp_memory(). */
static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
SCTP_STATIC int sctp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
{
sctp_opt_t *sp;
sctp_endpoint_t *ep;
......@@ -1094,7 +1098,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
*/
static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
SCTP_STATIC int sctp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
int noblock, int flags, int *addr_len)
{
sctp_ulpevent_t *event = NULL;
......@@ -1229,7 +1233,7 @@ static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
* optval - the buffer to store the value of the option.
* optlen - the size of the buffer.
*/
static int sctp_setsockopt(struct sock *sk, int level, int optname,
SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
int retval = 0;
......@@ -1313,19 +1317,20 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
}
/* FIXME: Write comments. */
static int sctp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
return -EOPNOTSUPP; /* STUB */
}
/* FIXME: Write comments. */
static int sctp_disconnect(struct sock *sk, int flags)
SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
{
return -EOPNOTSUPP; /* STUB */
}
/* FIXME: Write comments. */
static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
{
int error = -EOPNOTSUPP;
......@@ -1334,7 +1339,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
}
/* FIXME: Write Comments. */
static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
return -EOPNOTSUPP; /* STUB */
}
......@@ -1343,7 +1348,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
* initialized the SCTP-specific portion of the sock.
* The sock structure should already be zero-filled memory.
*/
static int sctp_init_sock(struct sock *sk)
SCTP_STATIC int sctp_init_sock(struct sock *sk)
{
sctp_endpoint_t *ep;
sctp_protocol_t *proto;
......@@ -1428,7 +1433,7 @@ static int sctp_init_sock(struct sock *sk)
}
/* Cleanup any SCTP per socket resources. */
static int sctp_destroy_sock(struct sock *sk)
SCTP_STATIC int sctp_destroy_sock(struct sock *sk)
{
sctp_endpoint_t *ep;
......@@ -1442,7 +1447,7 @@ static int sctp_destroy_sock(struct sock *sk)
}
/* FIXME: Comments needed. */
static void sctp_shutdown(struct sock *sk, int how)
SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
{
/* UDP-style sockets do not support shutdown. */
/* STUB */
......@@ -1563,7 +1568,7 @@ static inline int sctp_getsockopt_autoclose(struct sock *sk, int len, char *optv
}
/* Helper routine to branch off an association to a new socket. */
static int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newsock)
SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newsock)
{
struct sock *oldsk = assoc->base.sk;
struct sock *newsk;
......@@ -1652,7 +1657,7 @@ static inline int sctp_getsockopt_peeloff(struct sock *sk, int len, char *optval
return 0;
}
static int sctp_getsockopt(struct sock *sk, int level, int optname,
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen)
{
int retval = 0;
......@@ -1734,6 +1739,8 @@ static void sctp_unhash(struct sock *sk)
* link to the socket (struct sock) that uses it, the port number and
* a fastreuse flag (FIXME: NPI ipg).
*/
static sctp_bind_bucket_t *sctp_bucket_create(sctp_bind_hashbucket_t *head,
unsigned short snum);
static long sctp_get_port_local(struct sock *sk, unsigned short snum)
{
sctp_bind_hashbucket_t *head; /* hash list */
......@@ -1927,7 +1934,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
* An application uses listen() to mark a socket as being able to
* accept new associations.
*/
static int sctp_seqpacket_listen(struct sock *sk, int backlog)
SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
{
sctp_opt_t *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
......@@ -2184,7 +2191,8 @@ static int sctp_autobind(struct sock *sk)
* msg_control
* points here
*/
static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
sctp_cmsgs_t *cmsgs)
{
struct cmsghdr *cmsg;
......@@ -2500,6 +2508,31 @@ static inline void sctp_set_owner_w(sctp_chunk_t *chunk)
sk->wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(sctp_association_t *asoc)
{
struct sock *sk = asoc->base.sk;
struct socket *sock = sk->socket;
if ((sctp_wspace(asoc) > 0) && sock) {
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
if (sk->sleep && waitqueue_active(sk->sleep))
wake_up_interruptible(sk->sleep);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (sock->fasync_list &&
!(sk->shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
}
/* Do accounting for the sndbuf space.
* Decrement the used sndbuf space of the corresponding association by the
* data size which was just transmitted(freed).
......@@ -2522,7 +2555,8 @@ static void sctp_wfree(struct sk_buff *skb)
}
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p, int msg_len)
static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p,
int msg_len)
{
struct sock *sk = asoc->base.sk;
int err = 0;
......@@ -2581,31 +2615,6 @@ static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p, int msg
goto out;
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(sctp_association_t *asoc)
{
struct sock *sk = asoc->base.sk;
struct socket *sock = sk->socket;
if ((sctp_wspace(asoc) > 0) && sock) {
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
if (sk->sleep && waitqueue_active(sk->sleep))
wake_up_interruptible(sk->sleep);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (sock->fasync_list &&
!(sk->shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
}
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment