Commit b4903b22 authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/home/davem/src/BK/sctp-2.5

into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents 56c32f41 cd722282
This diff is collapsed.
......@@ -83,6 +83,7 @@
#include <asm/uaccess.h>
#include <asm/page.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/sctp/structs.h>
#include <net/sctp/constants.h>
#include <net/sctp/sm.h>
......@@ -201,6 +202,11 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#define SCTP_SOCK_SLEEP_PRE(sk) SOCK_SLEEP_PRE(sk)
#define SCTP_SOCK_SLEEP_POST(sk) SOCK_SLEEP_POST(sk)
/* SCTP SNMP MIB stats handlers */
extern struct sctp_mib sctp_statistics[NR_CPUS * 2];
#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field)
#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
/* Determine if this is a valid kernel address. */
static inline int sctp_is_valid_kaddr(unsigned long addr)
......
......@@ -44,6 +44,7 @@
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -253,6 +254,9 @@ sctp_chunk_t *sctp_make_abort(const sctp_association_t *,
sctp_chunk_t *sctp_make_abort_no_data(const sctp_association_t *,
const sctp_chunk_t *,
__u32 tsn);
sctp_chunk_t *sctp_make_abort_user(const sctp_association_t *,
const sctp_chunk_t *,
const struct msghdr *);
sctp_chunk_t *sctp_make_heartbeat(const sctp_association_t *,
const sctp_transport_t *,
const void *payload,
......
......@@ -246,7 +246,10 @@ typedef struct sctp_func {
int optname,
char *optval,
int *optlen);
int (*get_dst_mtu) (const sockaddr_storage_t *address);
struct dst_entry *(*get_dst) (sockaddr_storage_t *daddr,
sockaddr_storage_t *saddr);
int (*cmp_saddr) (struct dst_entry *dst,
sockaddr_storage_t *saddr);
__u16 net_header_len;
int sockaddr_len;
sa_family_t sa_family;
......@@ -476,6 +479,8 @@ struct SCTP_chunk {
/* What is the origin IP address for this chunk? */
sockaddr_storage_t source;
/* Destination address for this chunk. */
sockaddr_storage_t dest;
/* For an inbound chunk, this tells us where it came from.
* For an outbound chunk, it tells us where we'd like it to
......@@ -492,7 +497,7 @@ void *sctp_addto_chunk(sctp_chunk_t *chunk, int len, const void *data);
int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data);
sctp_chunk_t *sctp_chunkify(struct sk_buff *, const sctp_association_t *,
struct sock *);
void sctp_init_source(sctp_chunk_t *chunk);
void sctp_init_addrs(sctp_chunk_t *chunk);
const sockaddr_storage_t *sctp_source(const sctp_chunk_t *chunk);
/* This is a structure for holding either an IPv6 or an IPv4 address. */
......@@ -655,6 +660,9 @@ struct SCTP_transport {
/* PMTU : The current known path MTU. */
__u32 pmtu;
/* Destination */
struct dst_entry *dst;
/* When was the last time(in jiffies) that a data packet was sent on
* this transport? This is used to adjust the cwnd when the transport
* becomes inactive.
......@@ -735,6 +743,7 @@ extern sctp_transport_t *sctp_transport_new(const sockaddr_storage_t *, int);
extern sctp_transport_t *sctp_transport_init(sctp_transport_t *,
const sockaddr_storage_t *, int);
extern void sctp_transport_set_owner(sctp_transport_t *, sctp_association_t *);
extern void sctp_transport_route(sctp_transport_t *, sockaddr_storage_t *);
extern void sctp_transport_free(sctp_transport_t *);
extern void sctp_transport_destroy(sctp_transport_t *);
extern void sctp_transport_reset_timers(sctp_transport_t *);
......
......@@ -189,6 +189,35 @@ struct udp_mib
unsigned long __pad[0];
} ____cacheline_aligned;
/* draft-ietf-sigtran-sctp-mib-07.txt */
struct sctp_mib
{
unsigned long SctpCurrEstab;
unsigned long SctpActiveEstabs;
unsigned long SctpPassiveEstabs;
unsigned long SctpAborteds;
unsigned long SctpShutdowns;
unsigned long SctpOutOfBlues;
unsigned long SctpChecksumErrors;
unsigned long SctpOutCtrlChunks;
unsigned long SctpOutOrderChunks;
unsigned long SctpOutUnorderChunks;
unsigned long SctpInCtrlChunks;
unsigned long SctpInOrderChunks;
unsigned long SctpInUnorderChunks;
unsigned long SctpFragUsrMsgs;
unsigned long SctpReasmUsrMsgs;
unsigned long SctpOutSCTPPacks;
unsigned long SctpInSCTPPacks;
unsigned long SctpRtoAlgorithm;
unsigned long SctpRtoMin;
unsigned long SctpRtoMax;
unsigned long SctpRtoInitial;
unsigned long SctpValCookieLife;
unsigned long SctpMaxInitRetr;
unsigned long __pad[0];
} ____cacheline_aligned;
struct linux_mib
{
unsigned long SyncookiesSent;
......
......@@ -208,7 +208,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->highest_sacked = asoc->ctsn_ack_point;
asoc->last_cwr_tsn = asoc->ctsn_ack_point;
asoc->unack_data = 0;
SCTP_DEBUG_PRINTK("myctsnap for %s INIT as 0x%x.\n",
......@@ -404,6 +404,9 @@ sctp_transport_t *sctp_assoc_add_peer(sctp_association_t *asoc,
sctp_transport_set_owner(peer, asoc);
/* Cache a route for the transport. */
sctp_transport_route(peer, NULL);
/* If this is the first transport addr on this association,
* initialize the association PMTU to the peer's PMTU.
* If not and the current association PMTU is higher than the new
......
......@@ -41,6 +41,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Hui Huang <hui.huang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -217,8 +218,8 @@ int sctp_rcv(struct sk_buff *skb)
/* Remember the SCTP header. */
chunk->sctp_hdr = sh;
/* Set the source address. */
sctp_init_source(chunk);
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk);
/* Remember where we came from. */
chunk->transport = transport;
......
......@@ -158,35 +158,60 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
}
#endif /* TEST_FRAME */
/* Returns the mtu for the given v6 destination address. */
int sctp_v6_get_dst_mtu(const sockaddr_storage_t *address)
/* FIXME: This macro needs to be moved to a common header file. */
#define NIP6(addr) \
ntohs((addr)->s6_addr16[0]), \
ntohs((addr)->s6_addr16[1]), \
ntohs((addr)->s6_addr16[2]), \
ntohs((addr)->s6_addr16[3]), \
ntohs((addr)->s6_addr16[4]), \
ntohs((addr)->s6_addr16[5]), \
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[7])
/* Returns the dst cache entry for the given source and destination ip
* addresses.
*/
struct dst_entry *sctp_v6_get_dst(sockaddr_storage_t *daddr,
sockaddr_storage_t *saddr)
{
struct dst_entry *dst;
struct flowi fl;
int dst_mtu = SCTP_DEFAULT_MAXSEGMENT;
struct flowi fl = { .nl_u = { .ip6_u = { .daddr = &daddr->v6.sin6_addr,
} } };
fl.proto = 0;
fl.fl6_dst = (struct in6_addr *)&address->v6.sin6_addr;
fl.fl6_src = NULL;
fl.fl6_flowlabel = 0;
fl.oif = 0;
fl.uli_u.ports.sport = 0;
fl.uli_u.ports.dport = 0;
SCTP_DEBUG_PRINTK("%s: DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
__FUNCTION__, NIP6(fl.fl6_dst));
if (saddr) {
fl.fl6_src = &saddr->v6.sin6_addr;
SCTP_DEBUG_PRINTK(
"SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x - ",
NIP6(fl.fl6_src));
}
dst = ip6_route_output(NULL, &fl);
if (dst) {
dst_mtu = dst_pmtu(dst);
SCTP_DEBUG_PRINTK("sctp_v6_get_dst_mtu: "
"ip6_route_output: dev:%s pmtu:%d\n",
dst->dev->name, dst_mtu);
dst_release(dst);
struct rt6_info *rt;
rt = (struct rt6_info *)dst;
SCTP_DEBUG_PRINTK(
"rt6_dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
"rt6_src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
NIP6(&rt->rt6i_dst.addr), NIP6(&rt->rt6i_src.addr));
} else {
SCTP_DEBUG_PRINTK("sctp_v6_get_dst_mtu: "
"ip6_route_output failed, returning "
"%d as dst_mtu\n", dst_mtu);
SCTP_DEBUG_PRINTK("NO ROUTE\n");
return NULL;
}
return dst_mtu;
return dst;
}
/* Check if the dst entry's source addr matches the given source addr. */
int sctp_v6_cmp_saddr(struct dst_entry *dst, sockaddr_storage_t *saddr)
{
struct rt6_info *rt = (struct rt6_info *)dst;
return ipv6_addr_cmp(&rt->rt6i_src.addr, &saddr->v6.sin6_addr);
}
/* Initialize a PF_INET6 socket msg_name. */
......@@ -301,7 +326,8 @@ static sctp_func_t sctp_ipv6_specific = {
.queue_xmit = sctp_v6_xmit,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.get_dst_mtu = sctp_v6_get_dst_mtu,
.get_dst = sctp_v6_get_dst,
.cmp_saddr = sctp_v6_cmp_saddr,
.net_header_len = sizeof(struct ipv6hdr),
.sockaddr_len = sizeof(struct sockaddr_in6),
.sa_family = AF_INET6,
......
......@@ -245,6 +245,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
int err = 0;
int padding; /* How much padding do we need? */
__u8 packet_has_data = 0;
struct dst_entry *dst;
/* Do NOT generate a chunkless packet... */
if (skb_queue_empty(&packet->chunks))
......@@ -410,13 +411,6 @@ int sctp_packet_transmit(sctp_packet_t *packet)
asoc->peer.last_sent_to = transport;
}
/* Hey, before Linux changes, here's what we have to
* do to force IP routing to recognize the change of
* dest addr. --xguo
*/
if (sk->dst_cache)
sk->dst_cache->obsolete = 1;
if (packet_has_data) {
struct timer_list *timer;
unsigned long timeout;
......@@ -434,6 +428,13 @@ int sctp_packet_transmit(sctp_packet_t *packet)
}
}
dst = transport->dst;
if (!dst || dst->obsolete) {
sctp_transport_route(transport, NULL);
}
nskb->dst = dst_clone(transport->dst);
SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb length %d\n",
nskb->len);
(*transport->af_specific->queue_xmit)(nskb);
......
......@@ -204,18 +204,43 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
return error;
}
/* Insert a chunk into the retransmit queue. Chunks on the retransmit
* queue are kept in order, based on the TSNs.
*/
void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q)
{
struct list_head *rlchunk;
sctp_chunk_t *tchunk, *rchunk;
__u32 ttsn, rtsn;
int done = 0;
tchunk = list_entry(tlchunk, sctp_chunk_t, transmitted_list);
ttsn = ntohl(tchunk->subh.data_hdr->tsn);
list_for_each(rlchunk, &q->retransmit) {
rchunk = list_entry(rlchunk, sctp_chunk_t, transmitted_list);
rtsn = ntohl(rchunk->subh.data_hdr->tsn);
if (TSN_lt(ttsn, rtsn)) {
list_add(tlchunk, rlchunk->prev);
done = 1;
break;
}
}
if (!done) {
list_add_tail(tlchunk, &q->retransmit);
}
}
/* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
__u8 fast_retransmit)
{
struct list_head *lchunk;
struct list_head *lchunk, *ltemp;
sctp_chunk_t *chunk;
struct list_head tlist;
INIT_LIST_HEAD(&tlist);
while (!list_empty(&transport->transmitted)) {
lchunk = sctp_list_dequeue(&transport->transmitted);
/* Walk through the specified transmitted queue. */
list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
chunk = list_entry(lchunk, sctp_chunk_t, transmitted_list);
/* If we are doing retransmission due to a fast retransmit,
......@@ -224,10 +249,8 @@ void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
* retransmission due to a timeout, only the chunks that are
* not yet acked should be added to the retransmit queue.
*/
if ((fast_retransmit && !chunk->fast_retransmit) ||
(!fast_retransmit && chunk->tsn_gap_acked)) {
list_add_tail(lchunk, &tlist);
} else {
if ((fast_retransmit && chunk->fast_retransmit) ||
(!fast_retransmit && !chunk->tsn_gap_acked)) {
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
......@@ -257,16 +280,15 @@ void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
chunk->rtt_in_progress = 0;
transport->rto_pending = 0;
}
list_add_tail(lchunk, &q->retransmit);
/* Move the chunk to the retransmit queue. The chunks
* on the retransmit queue is always kept in order.
*/
list_del(lchunk);
sctp_retransmit_insert(lchunk, q);
}
}
/* Reconstruct the transmitted queue with chunks that are not
* eligible for retransmission.
*/
while (NULL != (lchunk = sctp_list_dequeue(&tlist)))
list_add_tail(lchunk, &transport->transmitted);
SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, "
"cwnd: %d, ssthresh: %d, flight_size: %d, "
"pba: %d\n", __FUNCTION__,
......
......@@ -59,6 +59,7 @@
/* Global data structures. */
sctp_protocol_t sctp_proto;
struct proc_dir_entry *proc_net_sctp;
struct sctp_mib sctp_statistics[NR_CPUS * 2];
/* This is the global socket data structure used for responding to
* the Out-of-the-blue (OOTB) packets. A control sock will be created
......@@ -255,27 +256,41 @@ int sctp_copy_local_addr_list(sctp_protocol_t *proto, sctp_bind_addr_t *bp,
return error;
}
/* Returns the mtu for the given v4 destination address. */
int sctp_v4_get_dst_mtu(const sockaddr_storage_t *address)
/* Returns the dst cache entry for the given source and destination ip
* addresses.
*/
struct dst_entry *sctp_v4_get_dst(sockaddr_storage_t *daddr,
sockaddr_storage_t *saddr)
{
int dst_mtu = SCTP_DEFAULT_MAXSEGMENT;
struct rtable *rt;
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = address->v4.sin_addr.s_addr } } };
struct flowi fl = { .nl_u = { .ip4_u = { .daddr =
daddr->v4.sin_addr.s_addr,
} } };
if (saddr)
fl.fl4_src = saddr->v4.sin_addr.s_addr;
SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ",
__FUNCTION__, NIPQUAD(fl.fl4_dst),
NIPQUAD(fl.fl4_src));
if (ip_route_output_key(&rt, &fl)) {
SCTP_DEBUG_PRINTK("sctp_v4_get_dst_mtu:ip_route_output_key"
" failed, returning %d as dst_mtu\n",
dst_mtu);
} else {
dst_mtu = dst_pmtu(&rt->u.dst);
SCTP_DEBUG_PRINTK("sctp_v4_get_dst_mtu: "
"ip_route_output_key: dev:%s pmtu:%d\n",
rt->u.dst.dev->name, dst_mtu);
ip_rt_put(rt);
SCTP_DEBUG_PRINTK("NO ROUTE\n");
return NULL;
}
return dst_mtu;
SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
NIPQUAD(rt->rt_src), NIPQUAD(rt->rt_dst));
return &rt->u.dst;
}
/* Check if the dst entry's source addr matches the given source addr. */
int sctp_v4_cmp_saddr(struct dst_entry *dst, sockaddr_storage_t *saddr)
{
struct rtable *rt = (struct rtable *)dst;
return (rt->rt_src == saddr->v4.sin_addr.s_addr);
}
/* Event handler for inet device events.
......@@ -437,7 +452,8 @@ sctp_func_t sctp_ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.get_dst_mtu = sctp_v4_get_dst_mtu,
.get_dst = sctp_v4_get_dst,
.cmp_saddr = sctp_v4_cmp_saddr,
.net_header_len = sizeof(struct iphdr),
.sockaddr_len = sizeof(struct sockaddr_in),
.sa_family = AF_INET,
......
......@@ -47,6 +47,7 @@
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -838,6 +839,53 @@ sctp_chunk_t *sctp_make_abort_no_data(const sctp_association_t *asoc,
return retval;
}
/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
sctp_chunk_t *sctp_make_abort_user(const sctp_association_t *asoc,
const sctp_chunk_t *chunk,
const struct msghdr *msg)
{
sctp_chunk_t *retval;
void *payload = NULL, *payoff;
size_t paylen;
struct iovec *iov = msg->msg_iov;
int iovlen = msg->msg_iovlen;
paylen = get_user_iov_size(iov, iovlen);
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen);
if (!retval)
goto err_chunk;
if (paylen) {
/* Put the msg_iov together into payload. */
payload = kmalloc(paylen, GFP_ATOMIC);
if (!payload)
goto err_payload;
payoff = payload;
for (; iovlen > 0; --iovlen) {
if (copy_from_user(payoff, iov->iov_base, iov->iov_len))
goto err_copy;
payoff += iov->iov_len;
iov++;
}
}
sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
if (paylen)
kfree(payload);
return retval;
err_copy:
kfree(payload);
err_payload:
sctp_free_chunk(retval);
retval = NULL;
err_chunk:
return retval;
}
/* Make a HEARTBEAT chunk. */
sctp_chunk_t *sctp_make_heartbeat(const sctp_association_t *asoc,
const sctp_transport_t *transport,
......@@ -982,16 +1030,17 @@ sctp_chunk_t *sctp_chunkify(struct sk_buff *skb, const sctp_association_t *asoc,
return retval;
}
/* Set chunk->source based on the IP header in chunk->skb. */
void sctp_init_source(sctp_chunk_t *chunk)
/* Set chunk->source and dest based on the IP header in chunk->skb. */
void sctp_init_addrs(sctp_chunk_t *chunk)
{
sockaddr_storage_t *source;
sockaddr_storage_t *source, *dest;
struct sk_buff *skb;
struct sctphdr *sh;
struct iphdr *ih4;
struct ipv6hdr *ih6;
source = &chunk->source;
dest = &chunk->dest;
skb = chunk->skb;
ih4 = skb->nh.iph;
ih6 = skb->nh.ipv6h;
......@@ -1002,6 +1051,9 @@ void sctp_init_source(sctp_chunk_t *chunk)
source->v4.sin_family = AF_INET;
source->v4.sin_port = ntohs(sh->source);
source->v4.sin_addr.s_addr = ih4->saddr;
dest->v4.sin_family = AF_INET;
dest->v4.sin_port = ntohs(sh->dest);
dest->v4.sin_addr.s_addr = ih4->daddr;
break;
case 6:
......@@ -1009,6 +1061,9 @@ void sctp_init_source(sctp_chunk_t *chunk)
source->v6.sin6_family = AF_INET6;
source->v6.sin6_port = ntohs(sh->source);
source->v6.sin6_addr = ih6->saddr;
dest->v6.sin6_family = AF_INET6;
dest->v6.sin6_port = ntohs(sh->dest);
dest->v6.sin6_addr = ih6->daddr;
/* FIXME: What do we do with scope, etc. ? */
break;
)
......
......@@ -66,7 +66,8 @@ static void sctp_do_ecn_cwr_work(sctp_association_t *asoc,
static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
sctp_transport_t *transport);
static void sctp_cmd_init_failed(sctp_cmd_seq_t *, sctp_association_t *asoc);
static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *, sctp_association_t *asoc);
static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *, sctp_association_t *asoc,
sctp_event_t event_type, sctp_chunk_t *chunk);
static void sctp_cmd_process_init(sctp_cmd_seq_t *, sctp_association_t *asoc,
sctp_chunk_t *chunk,
sctp_init_chunk_t *peer_init,
......@@ -251,7 +252,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
int force;
sctp_cmd_t *command;
sctp_chunk_t *new_obj;
sctp_chunk_t *chunk;
sctp_chunk_t *chunk = NULL;
sctp_packet_t *packet;
struct list_head *pos;
struct timer_list *timer;
......@@ -259,7 +260,8 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_transport_t *t;
sctp_sackhdr_t sackh;
chunk = (sctp_chunk_t *) event_arg;
if(SCTP_EVENT_T_TIMEOUT != event_type)
chunk = (sctp_chunk_t *) event_arg;
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
......@@ -504,7 +506,8 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_ASSOC_FAILED:
sctp_cmd_assoc_failed(commands, asoc);
sctp_cmd_assoc_failed(commands, asoc, event_type,
chunk);
break;
case SCTP_CMD_COUNTER_INC:
......@@ -595,10 +598,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
/* A helper function for delayed processing of INET ECN CE bit. */
static void sctp_do_ecn_ce_work(sctp_association_t *asoc, __u32 lowest_tsn)
{
/*
* Save the TSN away for comparison when we receive CWR
* Note: dp->TSN is expected in host endian
*/
/* Save the TSN away for comparison when we receive CWR */
asoc->last_ecne_tsn = lowest_tsn;
asoc->need_ecne = 1;
......@@ -621,7 +621,6 @@ static sctp_chunk_t *sctp_do_ecn_ecne_work(sctp_association_t *asoc,
sctp_chunk_t *chunk)
{
sctp_chunk_t *repl;
sctp_transport_t *transport;
/* Our previously transmitted packet ran into some congestion
* so we should take action by reducing cwnd and ssthresh
......@@ -629,43 +628,28 @@ static sctp_chunk_t *sctp_do_ecn_ecne_work(sctp_association_t *asoc,
* sending a CWR.
*/
/* Find which transport's congestion variables
* need to be adjusted.
/* First, try to determine if we want to actually lower
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
*/
if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
sctp_transport_t *transport;
transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
/* Update the congestion variables. */
if (transport)
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_ECNE);
/* Update the congestion variables. */
if (transport)
sctp_transport_lower_cwnd(transport,
SCTP_LOWER_CWND_ECNE);
asoc->last_cwr_tsn = lowest_tsn;
}
/* Save away a rough idea of when we last sent out a CWR.
* We compare against this value (see above) to decide if
* this is a fairly new request.
* Note that this is not a perfect solution. We may
* have moved beyond the window (several times) by the
* next time we get an ECNE. However, it is cute. This idea
* came from Randy's reference code.
*
* Here's what RFC 2960 has to say about CWR. This is NOT
* what we do.
*
* RFC 2960 Appendix A
*
* CWR:
*
* RFC 2481 details a specific bit for a sender to send in
* the header of its next outbound TCP segment to indicate
* to its peer that it has reduced its congestion window.
* This is termed the CWR bit. For SCTP the same
* indication is made by including the CWR chunk. This
* chunk contains one data element, i.e. the TSN number
* that was sent in the ECNE chunk. This element
* represents the lowest TSN number in the datagram that
* was originally marked with the CE bit.
/* Always try to quiet the other end. In case of lost CWR,
* resend last_cwr_tsn.
*/
asoc->last_cwr_tsn = asoc->next_tsn - 1;
repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
/* If we run out of memory, it will look like a lost CWR. We'll
......@@ -1038,14 +1022,26 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_association_t *asoc)
sctp_association_t *asoc,
sctp_event_t event_type,
sctp_chunk_t *chunk)
{
sctp_ulpevent_t *event;
__u16 error = 0;
if (event_type == SCTP_EVENT_T_PRIMITIVE)
error = SCTP_ERROR_USER_ABORT;
if (chunk && (SCTP_CID_ABORT == chunk->chunk_hdr->type) &&
(ntohs(chunk->chunk_hdr->length) >= (sizeof(struct sctp_chunkhdr) +
sizeof(struct sctp_errhdr)))) {
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
}
event = sctp_ulpevent_make_assoc_change(asoc,
0,
SCTP_COMM_LOST,
0, 0, 0,
error, 0, 0,
GFP_ATOMIC);
if (event)
......
......@@ -43,6 +43,7 @@
* Hui Huang <hui.huang@nokia.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -865,9 +866,9 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const sctp_endpoint_t *ep,
/* Helper function to send out an abort for the restart
* condition.
*/
static int sctp_sf_send_restart_abort(sockaddr_storage_t *ssa,
static int sctp_sf_send_restart_abort(sockaddr_storage_t *ssa,
sctp_chunk_t *init,
sctp_cmd_seq_t *commands)
sctp_cmd_seq_t *commands)
{
int len;
sctp_packet_t *pkt;
......@@ -881,7 +882,7 @@ static int sctp_sf_send_restart_abort(sockaddr_storage_t *ssa,
*/
errhdr = (sctp_errhdr_t *)buffer;
addrparm = (sctp_addr_param_t *)errhdr->variable;
/* Copy into a parm format. */
len = sockaddr2sctp_addr(ssa, addrparm);
len += sizeof(sctp_errhdr_t);
......@@ -897,7 +898,7 @@ static int sctp_sf_send_restart_abort(sockaddr_storage_t *ssa,
*/
pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len);
if (!pkt)
if (!pkt)
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
......@@ -906,18 +907,18 @@ static int sctp_sf_send_restart_abort(sockaddr_storage_t *ssa,
out:
/* Even if there is no memory, treat as a failure so
* the packet will get dropped.
* the packet will get dropped.
*/
return 0;
}
/* A restart is occuring, check to make sure no new addresses
/* A restart is occuring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
static int sctp_sf_check_restart_addrs(const sctp_association_t *new_asoc,
const sctp_association_t *asoc,
sctp_chunk_t *init,
sctp_cmd_seq_t *commands)
sctp_cmd_seq_t *commands)
{
sctp_transport_t *new_addr, *addr;
struct list_head *pos, *pos2;
......@@ -956,8 +957,8 @@ static int sctp_sf_check_restart_addrs(const sctp_association_t *new_asoc,
if (!found && new_addr) {
sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
}
/* Return success if all addresses were found. */
/* Return success if all addresses were found. */
return found;
}
......@@ -1053,7 +1054,7 @@ static char sctp_tietags_compare(sctp_association_t *new_asoc,
*/
static sctp_disposition_t sctp_sf_do_unexpected_init(
const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
const sctp_association_t *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
......@@ -1130,10 +1131,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
/* Make sure no new addresses are being added during the
* restart. Do not do this check for COOKIE-WAIT state,
* since there are no peer addresses to check against.
* Upon return an ABORT will have been sent if needed.
* Upon return an ABORT will have been sent if needed.
*/
if (asoc->state != SCTP_STATE_COOKIE_WAIT) {
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
commands)) {
retval = SCTP_DISPOSITION_CONSUME;
goto cleanup_asoc;
......@@ -1333,9 +1334,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
* since you'd have to get inside the cookie.
*/
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
printk("cookie echo check\n");
printk("cookie echo check\n");
return SCTP_DISPOSITION_CONSUME;
}
}
/* For now, fail any unsent/unacked data. Consider the optional
* choice of resending of this data.
......@@ -1542,7 +1543,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const sctp_endpoint_t *ep,
* are in good shape.
*/
chunk->subh.cookie_hdr = (sctp_signed_cookie_t *)chunk->skb->data;
skb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
skb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
/* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
......@@ -2098,18 +2099,11 @@ sctp_disposition_t sctp_sf_do_ecne(const sctp_endpoint_t *ep,
ecne = (sctp_ecnehdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t));
ecne->lowest_tsn = ntohl(ecne->lowest_tsn);
/* Casting away the const, as we are just modifying the spinlock,
* not the association itself. This should go away in the near
* future when we move to an endpoint based lock.
*/
/* If this is a newer ECNE than the last CWR packet we sent out */
if (TSN_lt(asoc->last_cwr_tsn, ecne->lowest_tsn)) {
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
SCTP_U32(ecne->lowest_tsn));
}
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
SCTP_U32(ntohl(ecne->lowest_tsn)));
return SCTP_DISPOSITION_CONSUME;
}
......@@ -2641,7 +2635,7 @@ sctp_disposition_t sctp_sf_operr_notify(const sctp_endpoint_t *ep,
sctp_ulpevent_t *ev;
while (chunk->chunk_end > chunk->skb->data) {
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
GFP_ATOMIC);
if (!ev)
goto nomem;
......@@ -3401,13 +3395,14 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
struct msghdr *msg = arg;
sctp_chunk_t *abort;
sctp_disposition_t retval;
retval = SCTP_DISPOSITION_CONSUME;
/* Generate ABORT chunk to send the peer. */
abort = sctp_make_abort(asoc, NULL, 0);
abort = sctp_make_abort_user(asoc, NULL, msg);
if (!abort)
retval = SCTP_DISPOSITION_NOMEM;
else
......@@ -3525,6 +3520,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
struct msghdr *msg = arg;
sctp_chunk_t *abort;
sctp_disposition_t retval;
......@@ -3534,7 +3530,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
retval = SCTP_DISPOSITION_CONSUME;
/* Generate ABORT chunk to send the peer */
abort = sctp_make_abort(asoc, NULL, 0);
abort = sctp_make_abort_user(asoc, NULL, msg);
if (!abort)
retval = SCTP_DISPOSITION_NOMEM;
else
......@@ -4258,6 +4254,11 @@ sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
if (!packet)
goto nomem_packet;
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route(transport, (sockaddr_storage_t *)&chunk->dest);
packet = sctp_packet_init(packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0, NULL);
......
......@@ -804,10 +804,12 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
SCTP_DEBUG_PRINTK("msg_len: %Zd, sinfo_flags: 0x%x\n",
msg_len, sinfo_flags);
/* If MSG_EOF|MSG_ABORT is set, no data can be sent. Disallow
* sending 0-length messages when MSG_EOF|MSG_ABORT is not set.
*/
if (((sinfo_flags & (MSG_EOF|MSG_ABORT)) && (msg_len > 0)) ||
/* If MSG_EOF is set, no data can be sent. Disallow sending zero
* length messages when MSG_EOF|MSG_ABORT is not set.
* If MSG_ABORT is set, the message length could be non zero with
* the msg_iov set to the user abort reason.
*/
if (((sinfo_flags & MSG_EOF) && (msg_len > 0)) ||
(!(sinfo_flags & (MSG_EOF|MSG_ABORT)) && (msg_len == 0))) {
err = -EINVAL;
goto out_nounlock;
......@@ -879,7 +881,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if (sinfo_flags & MSG_ABORT) {
SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
sctp_primitive_ABORT(asoc, NULL);
sctp_primitive_ABORT(asoc, msg);
err = 0;
goto out_unlock;
}
......@@ -1238,6 +1240,9 @@ static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
{
sctp_opt_t *sp = sctp_sk(sk);
/* Applicable to UDP-style socket only */
if (SCTP_SOCKET_TCP == sp->type)
return -EOPNOTSUPP;
if (optlen != sizeof(int))
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
......@@ -1593,6 +1598,9 @@ static inline int sctp_getsockopt_set_events(struct sock *sk, int len, char *opt
static inline int sctp_getsockopt_autoclose(struct sock *sk, int len, char *optval, int *optlen)
{
/* Applicable to UDP-style socket only */
if (SCTP_SOCKET_TCP == sctp_sk(sk)->type)
return -EOPNOTSUPP;
if (len != sizeof(int))
return -EINVAL;
if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
......@@ -1614,10 +1622,10 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
int err = 0;
/* An association cannot be branched off from an already peeled-off
* socket.
* socket, nor is this supported for tcp style sockets.
*/
if (SCTP_SOCKET_UDP_HIGH_BANDWIDTH == sctp_sk(oldsk)->type)
return -EINVAL;
if (SCTP_SOCKET_UDP != sctp_sk(oldsk)->type)
return -EOPNOTSUPP;
/* Create a new socket. */
err = sock_create(PF_INET, SOCK_SEQPACKET, IPPROTO_SCTP, &tmpsock);
......
......@@ -87,7 +87,6 @@ sctp_transport_t *sctp_transport_init(sctp_transport_t *peer,
peer->ipaddr = *addr;
peer->af_specific = sctp_get_af_specific(addr);
peer->asoc = NULL;
peer->pmtu = peer->af_specific->get_dst_mtu(addr);
/* From 6.3.1 RTO Calculation:
*
......@@ -161,6 +160,7 @@ void sctp_transport_destroy(sctp_transport_t *transport)
if (transport->asoc)
sctp_association_put(transport->asoc);
dst_release(transport->dst);
kfree(transport);
SCTP_DBG_OBJCNT_DEC(transport);
}
......@@ -200,6 +200,78 @@ void sctp_transport_set_owner(sctp_transport_t *transport,
sctp_association_hold(asoc);
}
/* Caches the dst entry for a transport's destination address and an optional
* souce address.
*/
void sctp_transport_route(sctp_transport_t *transport,
sockaddr_storage_t *saddr)
{
sctp_association_t *asoc = transport->asoc;
sctp_func_t *af = transport->af_specific;
sockaddr_storage_t *daddr = &transport->ipaddr;
sctp_bind_addr_t *bp;
rwlock_t *addr_lock;
struct sockaddr_storage_list *laddr;
struct list_head *pos;
struct dst_entry *dst;
dst = af->get_dst(daddr, saddr);
/* If there is no association or if a source address is passed,
* no more validation is required.
*/
if (!asoc || saddr)
goto out;
if (SCTP_STATE_ESTABLISHED == asoc->state) {
bp = &asoc->base.bind_addr;
addr_lock = &asoc->base.addr_lock;
} else {
bp = &asoc->ep->base.bind_addr;
addr_lock = &asoc->ep->base.addr_lock;
}
if (dst) {
/* Walk through the bind address list and look for a bind
* address that matches the source address of the returned dst.
*/
sctp_read_lock(addr_lock);
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sockaddr_storage_list,
list);
if (af->cmp_saddr(dst, &laddr->a))
goto out_unlock;
}
sctp_read_unlock(addr_lock);
/* None of the bound addresses match the source address of the
* dst. So release it.
*/
dst_release(dst);
}
/* Walk through the bind address list and try to get a dst that
* matches a bind address as the source address.
*/
sctp_read_lock(addr_lock);
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sockaddr_storage_list, list);
dst = af->get_dst(daddr, &laddr->a);
if (dst)
goto out_unlock;
}
out_unlock:
sctp_read_unlock(addr_lock);
out:
transport->dst = dst;
if (dst)
transport->pmtu = dst_pmtu(dst);
else
transport->pmtu = SCTP_DEFAULT_MAXSEGMENT;
}
/* Hold a reference to a transport. */
void sctp_transport_hold(sctp_transport_t *transport)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment