Commit 46b6a3a9 authored by Jon Grimm's avatar Jon Grimm Committed by Sridhar Samudrala

[SCTP] Minor surgery on ulpevent & related cleanups.

sndrcvinfo.sinfo_cumtsn is new field added by the latest (05) API I-D.
Remove unused fields in ulpevent, minimally to make room for for
storing this new field.   But I'll clear out even more so I can
make room for impending partial data delivery work.

See changes in comments for ulpqueue.c.
Many naming and typedef removal cleanups. 
parent 9d6d6cb3
......@@ -115,7 +115,7 @@ typedef union {
struct sctp_transport *transport;
sctp_bind_addr_t *bp;
sctp_init_chunk_t *init;
sctp_ulpevent_t *ulpevent;
struct sctp_ulpevent *ulpevent;
sctp_packet_t *packet;
sctp_sackhdr_t *sackh;
} sctp_arg_t;
......@@ -163,7 +163,7 @@ SCTP_ARG_CONSTRUCTOR(ASOC, sctp_association_t *, asoc)
SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
SCTP_ARG_CONSTRUCTOR(BA, sctp_bind_addr_t *, bp)
SCTP_ARG_CONSTRUCTOR(PEER_INIT, sctp_init_chunk_t *, init)
SCTP_ARG_CONSTRUCTOR(ULPEVENT, sctp_ulpevent_t *, ulpevent)
SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
SCTP_ARG_CONSTRUCTOR(PACKET, sctp_packet_t *, packet)
SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh)
......
......@@ -2,7 +2,7 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001-2002 International Business Machines Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -105,26 +105,25 @@ union sctp_addr {
/* Forward declarations for data structures. */
struct sctp_protocol;
struct SCTP_endpoint;
struct SCTP_association;
struct sctp_endpoint;
struct sctp_association;
struct sctp_transport;
struct SCTP_packet;
struct SCTP_chunk;
struct SCTP_inqueue;
struct sctp_packet;
struct sctp_chunk;
struct sctp_inq;
struct sctp_outq;
struct SCTP_bind_addr;
struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_opt;
struct sctp_endpoint_common;
struct sctp_ssnmap;
typedef struct sctp_protocol sctp_protocol_t;
typedef struct SCTP_endpoint sctp_endpoint_t;
typedef struct SCTP_association sctp_association_t;
typedef struct SCTP_packet sctp_packet_t;
typedef struct SCTP_chunk sctp_chunk_t;
typedef struct SCTP_inqueue sctp_inqueue_t;
typedef struct SCTP_bind_addr sctp_bind_addr_t;
typedef struct sctp_endpoint sctp_endpoint_t;
typedef struct sctp_association sctp_association_t;
typedef struct sctp_packet sctp_packet_t;
typedef struct sctp_chunk sctp_chunk_t;
typedef struct sctp_bind_addr sctp_bind_addr_t;
typedef struct sctp_opt sctp_opt_t;
typedef struct sctp_endpoint_common sctp_endpoint_common_t;
......@@ -289,7 +288,7 @@ int sctp_register_af(struct sctp_af *);
/* Protocol family functions. */
struct sctp_pf {
void (*event_msgname)(sctp_ulpevent_t *, char *, int *);
void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
void (*skb_msgname) (struct sk_buff *, char *, int *);
int (*af_supported) (sa_family_t);
int (*cmp_addr) (const union sctp_addr *,
......@@ -484,7 +483,7 @@ static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
* As a matter of convenience, we remember the SCTP common header for
* each chunk as well as a few other header pointers...
*/
struct SCTP_chunk {
struct sctp_chunk {
/* These first three elements MUST PRECISELY match the first
* three elements of struct sk_buff. This allows us to reuse
* all the skb_* queue management functions.
......@@ -594,7 +593,7 @@ typedef sctp_chunk_t *(sctp_packet_phandler_t)(sctp_association_t *);
/* This structure holds lists of chunks as we are assembling for
* transmission.
*/
struct SCTP_packet {
struct sctp_packet {
/* These are the SCTP header values (host order) for the packet. */
__u16 source_port;
__u16 destination_port;
......@@ -846,8 +845,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *);
/* This is the structure we use to queue packets as they come into
* SCTP. We write packets to it and read chunks from it.
*/
struct SCTP_inqueue {
/* This is actually a queue of sctp_chunk_t each
struct sctp_inq {
/* This is actually a queue of sctp_chunk each
* containing a partially decoded packet.
*/
struct sk_buff_head in;
......@@ -864,13 +863,12 @@ struct SCTP_inqueue {
int malloced; /* Is this structure kfree()able? */
};
sctp_inqueue_t *sctp_inqueue_new(void);
void sctp_inqueue_init(sctp_inqueue_t *);
void sctp_inqueue_free(sctp_inqueue_t *);
void sctp_push_inqueue(sctp_inqueue_t *, sctp_chunk_t *packet);
sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *);
void sctp_inqueue_set_th_handler(sctp_inqueue_t *,
void (*)(void *), void *);
struct sctp_inq *sctp_inq_new(void);
void sctp_inq_init(struct sctp_inq *);
void sctp_inq_free(struct sctp_inq *);
void sctp_inq_push(struct sctp_inq *, sctp_chunk_t *packet);
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
/* This is the structure we use to hold outbound chunks. You push
* chunks in and they automatically pop out the other end as bundled
......@@ -954,7 +952,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
/* These bind address data fields common between endpoints and associations */
struct SCTP_bind_addr {
struct sctp_bind_addr {
/* RFC 2960 12.1 Parameters necessary for the SCTP instance
*
......@@ -1043,7 +1041,7 @@ struct sctp_endpoint_common {
struct sock *sk;
/* This is where we receive inbound chunks. */
sctp_inqueue_t inqueue;
struct sctp_inq inqueue;
/* This substructure includes the defining parameters of the
* endpoint:
......@@ -1076,7 +1074,7 @@ struct sctp_endpoint_common {
* off one of these.
*/
struct SCTP_endpoint {
struct sctp_endpoint {
/* Common substructure for endpoint and association. */
sctp_endpoint_common_t base;
......@@ -1172,7 +1170,7 @@ __u32 sctp_generate_tsn(const sctp_endpoint_t *ep);
/* Here we have information about each individual association. */
struct SCTP_association {
struct sctp_association {
/* A base structure common to endpoint and association.
* In this context, it represents the associations's view
......
......@@ -46,26 +46,31 @@
/* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now.
*/
typedef struct sctp_ulpevent {
int malloced;
sctp_association_t *asoc;
struct sk_buff *parent;
struct sctp_ulpevent {
struct sctp_association *asoc;
struct sctp_sndrcvinfo sndrcvinfo;
int chunk_flags; /* Temp. until we get a new chunk_t */
int msg_flags;
} sctp_ulpevent_t;
};
/* Retrieve the skb this event sits inside of. */
static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
{
return container_of((void *)ev, struct sk_buff, cb);
}
sctp_ulpevent_t *sctp_ulpevent_new(int size, int msg_flags, int priority);
sctp_ulpevent_t *sctp_ulpevent_init(sctp_ulpevent_t *event, struct sk_buff *skb, int msg_flags);
void sctp_ulpevent_free(sctp_ulpevent_t *event);
/* Retrieve & cast the event sitting inside the skb. */
static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
{
return (struct sctp_ulpevent *)skb->cb;
}
int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event);
struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int priority);
struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *, int flags);
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(
const struct SCTP_association *asoc,
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
__u16 flags,
__u16 state,
__u16 error,
......@@ -73,44 +78,43 @@ sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(
__u16 inbound,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
const struct SCTP_association *asoc,
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const struct sctp_association *asoc,
const struct sockaddr_storage *aaddr,
int flags,
int state,
int error,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_remote_error(
const struct SCTP_association *asoc,
struct SCTP_chunk *chunk,
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
__u16 flags,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_send_failed(
const struct SCTP_association *asoc,
struct SCTP_chunk *chunk,
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
__u16 flags,
__u32 error,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
const struct SCTP_association *asoc,
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
__u16 flags,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(struct SCTP_association *asoc,
struct SCTP_chunk *chunk,
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
struct sctp_chunk *chunk,
int priority);
void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
struct msghdr *msghdr);
__u16 sctp_ulpevent_get_notification_type(const sctp_ulpevent_t *event);
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
/* Given an event subscription, is this event enabled? */
static inline int sctp_ulpevent_is_enabled(const sctp_ulpevent_t *event,
static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
const struct sctp_event_subscribe *mask)
{
const char *amask = (const char *) mask;
......@@ -124,7 +128,6 @@ static inline int sctp_ulpevent_is_enabled(const sctp_ulpevent_t *event,
return enabled;
}
#endif /* __sctp_ulpevent_h__ */
......
......@@ -166,6 +166,7 @@ struct sctp_sndrcvinfo {
__u32 sinfo_context;
__u32 sinfo_timetolive;
__u32 sinfo_tsn;
__u32 sinfo_cumtsn;
sctp_assoc_t sinfo_assoc_id;
};
......
......@@ -241,8 +241,8 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->peer.sack_needed = 1;
/* Create an input queue. */
sctp_inqueue_init(&asoc->base.inqueue);
sctp_inqueue_set_th_handler(&asoc->base.inqueue,
sctp_inq_init(&asoc->base.inqueue);
sctp_inq_set_th_handler(&asoc->base.inqueue,
(void (*)(void *))sctp_assoc_bh_rcv,
asoc);
......@@ -311,7 +311,7 @@ void sctp_association_free(sctp_association_t *asoc)
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inqueue_free(&asoc->base.inqueue);
sctp_inq_free(&asoc->base.inqueue);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
......@@ -505,7 +505,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
struct sctp_transport *t = NULL;
struct sctp_transport *first;
struct sctp_transport *second;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct list_head *pos;
int spc_state = 0;
......@@ -776,7 +776,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
sctp_endpoint_t *ep;
sctp_chunk_t *chunk;
struct sock *sk;
sctp_inqueue_t *inqueue;
struct sctp_inq *inqueue;
int state, subtype;
sctp_assoc_t associd = sctp_assoc2id(asoc);
int error = 0;
......@@ -786,7 +786,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
sk = asoc->base.sk;
inqueue = &asoc->base.inqueue;
while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
state = asoc->state;
subtype = chunk->chunk_hdr->type;
......
......@@ -105,10 +105,10 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep, sctp_protocol_t *proto,
ep->base.malloced = 1;
/* Create an input queue. */
sctp_inqueue_init(&ep->base.inqueue);
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
sctp_inqueue_set_th_handler(&ep->base.inqueue,
sctp_inq_set_th_handler(&ep->base.inqueue,
(void (*)(void *))sctp_endpoint_bh_rcv,
ep);
......@@ -198,7 +198,7 @@ void sctp_endpoint_destroy(sctp_endpoint_t *ep)
sctp_unhash_endpoint(ep);
/* Cleanup the inqueue. */
sctp_inqueue_free(&ep->base.inqueue);
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
......@@ -333,7 +333,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
struct sock *sk;
struct sctp_transport *transport;
sctp_chunk_t *chunk;
sctp_inqueue_t *inqueue;
struct sctp_inq *inqueue;
sctp_subtype_t subtype;
sctp_state_t state;
int error = 0;
......@@ -345,7 +345,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
inqueue = &ep->base.inqueue;
sk = ep->base.sk;
while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
subtype.chunk = chunk->chunk_hdr->type;
/* We might have grown an association since last we
......
......@@ -244,7 +244,7 @@ int sctp_rcv(struct sk_buff *skb)
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
sctp_chunk_t *chunk;
sctp_inqueue_t *inqueue;
struct sctp_inq *inqueue;
/* One day chunk will live inside the skb, but for
* now this works.
......@@ -252,7 +252,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
chunk = (sctp_chunk_t *) skb;
inqueue = &chunk->rcvr->inqueue;
sctp_push_inqueue(inqueue, chunk);
sctp_inq_push(inqueue, chunk);
return 0;
}
......
......@@ -47,8 +47,8 @@
#include <net/sctp/sm.h>
#include <linux/interrupt.h>
/* Initialize an SCTP_inqueue. */
void sctp_inqueue_init(sctp_inqueue_t *queue)
/* Initialize an SCTP inqueue. */
void sctp_inq_init(struct sctp_inq *queue)
{
skb_queue_head_init(&queue->in);
queue->in_progress = NULL;
......@@ -59,21 +59,21 @@ void sctp_inqueue_init(sctp_inqueue_t *queue)
queue->malloced = 0;
}
/* Create an initialized SCTP_inqueue. */
sctp_inqueue_t *sctp_inqueue_new(void)
/* Create an initialized sctp_inq. */
struct sctp_inq *sctp_inq_new(void)
{
sctp_inqueue_t *retval;
struct sctp_inq *retval;
retval = t_new(sctp_inqueue_t, GFP_ATOMIC);
retval = t_new(struct sctp_inq, GFP_ATOMIC);
if (retval) {
sctp_inqueue_init(retval);
sctp_inq_init(retval);
retval->malloced = 1;
}
return retval;
}
/* Release the memory associated with an SCTP inqueue. */
void sctp_inqueue_free(sctp_inqueue_t *queue)
void sctp_inq_free(struct sctp_inq *queue)
{
sctp_chunk_t *chunk;
......@@ -96,7 +96,7 @@ void sctp_inqueue_free(sctp_inqueue_t *queue)
/* Put a new packet in an SCTP inqueue.
* We assume that packet->sctp_hdr is set and in host byte order.
*/
void sctp_push_inqueue(sctp_inqueue_t *q, sctp_chunk_t *packet)
void sctp_inq_push(struct sctp_inq *q, sctp_chunk_t *packet)
{
/* Directly call the packet handling routine. */
......@@ -114,7 +114,7 @@ void sctp_push_inqueue(sctp_inqueue_t *q, sctp_chunk_t *packet)
* WARNING: If you need to put the chunk on another queue, you need to
* make a shallow copy (clone) of it.
*/
sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
sctp_chunk_t *sctp_inq_pop(struct sctp_inq *queue)
{
sctp_chunk_t *chunk;
sctp_chunkhdr_t *ch = NULL;
......@@ -172,7 +172,7 @@ sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
chunk->end_of_packet = 1;
}
SCTP_DEBUG_PRINTK("+++sctp_pop_inqueue+++ chunk %p[%s],"
SCTP_DEBUG_PRINTK("+++sctp_inq_pop+++ chunk %p[%s],"
" length %d, skb->len %d\n",chunk,
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
......@@ -182,12 +182,12 @@ sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
/* Set a top-half handler.
*
* Originally, we the top-half handler was scheduled as a BH. We now
* call the handler directly in sctp_push_inqueue() at a time that
* call the handler directly in sctp_inq_push() at a time that
* we know we are lock safe.
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
void sctp_inqueue_set_th_handler(sctp_inqueue_t *q,
void sctp_inq_set_th_handler(struct sctp_inq *q,
void (*callback)(void *), void *arg)
{
INIT_WORK(&q->immediate, callback, arg);
......
......@@ -444,7 +444,7 @@ static void sctp_inet6_msgname(char *msgname, int *addr_len)
}
/* Initialize a PF_INET msgname from a ulpevent. */
static void sctp_inet6_event_msgname(sctp_ulpevent_t *event, char *msgname,
static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, char *msgname,
int *addrlen)
{
struct sockaddr_in6 *sin6, *sin6from;
......
......@@ -124,7 +124,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
/* Add the address to the local list. */
addr = t_new(struct sockaddr_storage_list, GFP_ATOMIC);
if (addr) {
INIT_LIST_HEAD(&addr->list);
addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
......@@ -557,7 +556,7 @@ static void sctp_inet_msgname(char *msgname, int *addr_len)
}
/* Copy the primary address of the peer primary address as the msg_name. */
static void sctp_inet_event_msgname(sctp_ulpevent_t *event, char *msgname,
static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname,
int *addr_len)
{
struct sockaddr_in *sin, *sinfrom;
......
......@@ -245,7 +245,8 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
retval = NULL;
addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, priority);
addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len,
priority);
if (!addrs.v)
goto nomem_rawaddr;
......
......@@ -1014,7 +1014,7 @@ static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
sctp_association_t *asoc)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
event = sctp_ulpevent_make_assoc_change(asoc,
0,
......@@ -1041,7 +1041,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_subtype_t subtype,
sctp_chunk_t *chunk)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
__u16 error = 0;
switch(event_type) {
......
......@@ -102,7 +102,7 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
/* RFC 2960 6.10 Bundling
*
......@@ -504,7 +504,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc;
sctp_init_chunk_t *peer_init;
sctp_chunk_t *repl;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
int error = 0;
sctp_chunk_t *err_chk_p;
......@@ -636,7 +636,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
/* RFC 2960 5.1 Normal Establishment of an Association
*
......@@ -1355,7 +1355,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc)
{
sctp_init_chunk_t *peer_init;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
sctp_chunk_t *repl;
/* new_asoc is a brand-new association, so these are not yet
......@@ -1421,7 +1421,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc)
{
sctp_init_chunk_t *peer_init;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
sctp_chunk_t *repl;
/* new_asoc is a brand-new association, so these are not yet
......@@ -1503,7 +1503,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands,
sctp_association_t *new_asoc)
{
sctp_ulpevent_t *ev = NULL;
struct sctp_ulpevent *ev = NULL;
sctp_chunk_t *repl;
/* Clarification from Implementor's Guide:
......@@ -2726,7 +2726,7 @@ sctp_disposition_t sctp_sf_operr_notify(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
while (chunk->chunk_end > chunk->skb->data) {
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
......@@ -2764,7 +2764,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep,
{
sctp_chunk_t *chunk = arg;
sctp_chunk_t *reply;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
/* 10.2 H) SHUTDOWN COMPLETE notification
*
......
......@@ -1110,7 +1110,7 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int len, int noblock, int flags, int *addr_len)
{
sctp_ulpevent_t *event = NULL;
struct sctp_ulpevent *event = NULL;
sctp_opt_t *sp = sctp_sk(sk);
struct sk_buff *skb;
int copied;
......@@ -1143,7 +1143,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_skb2event(skb);
if (err)
goto out_free;
......@@ -1777,7 +1777,7 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
sctp_opt_t *oldsp = sctp_sk(oldsk);
sctp_opt_t *newsp;
struct sk_buff *skb, *tmp;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
int err = 0;
/* An association cannot be branched off from an already peeled-off
......@@ -1811,7 +1811,7 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->receive_queue, tmp) {
event = (sctp_ulpevent_t *)skb->cb;
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, skb->list);
__skb_queue_tail(&newsk->receive_queue, skb);
......
This diff is collapsed.
......@@ -142,7 +142,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
if (event) {
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
skb_queue_tail(&temp, event->parent);
skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
......@@ -172,19 +172,20 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
if (event->parent->list)
sctp_skb_list_tail(event->parent->list, &sk->receive_queue);
if (sctp_event2skb(event)->list)
sctp_skb_list_tail(sctp_event2skb(event)->list,
&sk->receive_queue);
else
skb_queue_tail(&sk->receive_queue, event->parent);
skb_queue_tail(&sk->receive_queue, sctp_event2skb(event));
wake_up_interruptible(sk->sleep);
return 1;
out_free:
if (event->parent->list)
skb_queue_purge(event->parent->list);
if (sctp_event2skb(event)->list)
skb_queue_purge(sctp_event2skb(event)->list);
else
kfree_skb(event->parent);
kfree_skb(sctp_event2skb(event));
return 0;
}
......@@ -202,7 +203,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
/* Find the right place in this list. We store them by TSN. */
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (struct sctp_ulpevent *)pos->cb;
cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
if (TSN_lt(tsn, ctsn))
......@@ -211,9 +212,10 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
/* If the queue is empty, we have a different function to call. */
if (skb_peek(&ulpq->reasm))
__skb_insert(event->parent, pos->prev, pos, &ulpq->reasm);
__skb_insert(sctp_event2skb(event), pos->prev, pos,
&ulpq->reasm);
else
__skb_queue_tail(&ulpq->reasm, event->parent);
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
}
/* Helper function to return an event corresponding to the reassembled
......@@ -264,7 +266,7 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
pos = pnext;
} while (1);
event = (sctp_ulpevent_t *) f_frag->cb;
event = (struct sctp_ulpevent *) f_frag->cb;
return event;
}
......@@ -272,13 +274,13 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
sctp_ulpevent_t *retval = NULL;
struct sctp_ulpevent *retval = NULL;
/* Initialized to 0 just to avoid compiler warning message. Will
* never be used with this value. It is referenced only after it
......@@ -296,10 +298,10 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
* start the next pass when we find another first fragment.
*/
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
cevent = (struct sctp_ulpevent *) pos->cb;
ctsn = cevent->sndrcvinfo.sinfo_tsn;
switch (cevent->chunk_flags & SCTP_DATA_FRAG_MASK) {
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
first_frag = pos;
next_tsn = ctsn + 1;
......@@ -334,10 +336,10 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
sctp_ulpevent_t *retval = NULL;
struct sctp_ulpevent *retval = NULL;
/* FIXME: We should be using some new chunk structure here
* instead of carrying chunk fields in the event structure.
......@@ -346,7 +348,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
*/
/* Check if this is part of a fragmented message. */
if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK))
if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK))
return event;
sctp_ulpq_store_reasm(ulpq, event);
......@@ -359,7 +361,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
* ordered by an an incoming chunk.
*/
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
......@@ -373,7 +375,7 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
cssn = cevent->sndrcvinfo.sinfo_ssn;
......@@ -394,16 +396,16 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
__skb_unlink(pos, pos->list);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(event->parent->list, pos);
__skb_queue_tail(sctp_event2skb(event)->list, pos);
}
}
/* Helper function to store chunks needing ordering. */
static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
......@@ -415,7 +417,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
* stream ID and then by SSN.
*/
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
cssn = cevent->sndrcvinfo.sinfo_ssn;
......@@ -427,13 +429,14 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
/* If the queue is empty, we have a different function to call. */
if (skb_peek(&ulpq->lobby))
__skb_insert(event->parent, pos->prev, pos, &ulpq->lobby);
__skb_insert(sctp_event2skb(event), pos->prev, pos,
&ulpq->lobby);
else
__skb_queue_tail(&ulpq->lobby, event->parent);
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
}
static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
......@@ -445,7 +448,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
*/
/* Check if this message needs ordering. */
if (SCTP_DATA_UNORDERED & event->chunk_flags)
if (SCTP_DATA_UNORDERED & event->msg_flags)
return event;
/* Note: The stream ID must be verified before this routine. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment